From a24653cc5cca475962b27b71dcba29a4aa94d6ee Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Thu, 8 Apr 2021 09:43:39 -0700 Subject: [PATCH] Run a more strict formatter over the code (#11312) * Update tooling * Run gofumpt * go mod vendor --- Makefile | 2 +- api/client.go | 50 +- api/client_test.go | 4 +- api/lifetime_watcher.go | 6 +- api/output_string.go | 4 +- api/plugin_helpers.go | 2 +- api/ssh_agent_test.go | 5 +- api/sys_audit.go | 2 - api/sys_auth.go | 10 +- api/sys_plugins.go | 2 - audit/format_json_test.go | 14 +- audit/format_jsonx_test.go | 10 +- builtin/audit/file/backend.go | 2 +- builtin/audit/file/backend_test.go | 6 +- builtin/credential/app-id/backend.go | 8 +- builtin/credential/app-id/path_login.go | 8 +- builtin/credential/approle/path_login.go | 5 +- builtin/credential/approle/path_role.go | 154 +- builtin/credential/approle/path_role_test.go | 1 - .../credential/approle/path_tidy_user_id.go | 9 +- builtin/credential/aws/backend.go | 3 +- builtin/credential/aws/backend_e2e_test.go | 5 +- builtin/credential/aws/backend_test.go | 23 +- builtin/credential/aws/cli.go | 1 - builtin/credential/aws/client.go | 5 +- .../credential/aws/path_config_certificate.go | 2 + .../credential/aws/path_config_identity.go | 12 +- builtin/credential/aws/path_config_sts.go | 1 + .../path_config_tidy_identity_accesslist.go | 3 +- .../aws/path_config_tidy_roletag_denylist.go | 3 +- builtin/credential/aws/path_login.go | 2 - builtin/credential/aws/path_role.go | 5 +- builtin/credential/aws/path_role_test.go | 3 - builtin/credential/cert/backend_test.go | 45 +- builtin/credential/cert/path_certs.go | 32 +- builtin/credential/cert/path_config.go | 2 +- builtin/credential/cert/path_crls.go | 7 +- builtin/credential/cert/path_login.go | 2 +- builtin/credential/github/path_config.go | 8 +- builtin/credential/github/path_login.go | 4 +- builtin/credential/ldap/backend.go | 1 - builtin/credential/ldap/backend_test.go | 5 +- builtin/credential/ldap/path_login.go | 4 +- builtin/credential/okta/backend_test.go | 1 - builtin/credential/okta/path_config.go | 18 +- builtin/credential/okta/path_groups.go | 1 - builtin/credential/okta/path_login.go | 2 - builtin/credential/okta/path_users.go | 6 +- builtin/credential/radius/backend_test.go | 2 +- builtin/credential/radius/path_config.go | 16 +- builtin/credential/radius/path_login.go | 6 +- builtin/credential/radius/path_users.go | 7 +- builtin/credential/token/cli.go | 1 - builtin/credential/userpass/backend_test.go | 17 +- builtin/credential/userpass/path_login.go | 4 +- .../credential/userpass/path_user_password.go | 4 +- .../credential/userpass/path_user_policies.go | 6 +- builtin/credential/userpass/path_users.go | 12 +- builtin/logical/aws/backend_test.go | 11 +- builtin/logical/aws/iam_policies.go | 2 +- builtin/logical/aws/path_config_lease.go | 5 +- builtin/logical/aws/path_config_root.go | 12 +- builtin/logical/aws/path_roles.go | 30 +- builtin/logical/aws/path_roles_test.go | 8 +- builtin/logical/aws/path_user.go | 6 +- builtin/logical/aws/secret_access_keys.go | 9 +- .../logical/aws/secret_access_keys_test.go | 2 - builtin/logical/cassandra/backend.go | 1 - .../cassandra/path_config_connection.go | 20 +- .../logical/cassandra/path_creds_create.go | 2 +- builtin/logical/cassandra/path_roles.go | 10 +- builtin/logical/cassandra/secret_creds.go | 4 +- builtin/logical/consul/backend_test.go | 1 - builtin/logical/consul/path_config.go | 12 +- builtin/logical/consul/path_roles.go | 16 +- builtin/logical/consul/path_token.go | 6 +- builtin/logical/consul/secret_token.go | 2 +- builtin/logical/database/backend_test.go | 4 +- .../logical/database/dbplugin/plugin_test.go | 6 + .../database/path_config_connection.go | 14 +- builtin/logical/database/path_creds_create.go | 8 +- builtin/logical/database/path_roles.go | 8 +- .../database/path_rotate_credentials.go | 9 +- builtin/logical/database/version_wrapper.go | 4 +- .../logical/database/version_wrapper_test.go | 2 + builtin/logical/mongodb/backend_test.go | 11 +- builtin/logical/mongodb/path_config_lease.go | 1 - builtin/logical/mssql/backend_test.go | 1 - .../logical/mssql/path_config_connection.go | 7 +- builtin/logical/mssql/path_config_lease.go | 7 +- builtin/logical/mssql/path_creds_create.go | 2 +- builtin/logical/mssql/path_roles.go | 4 +- builtin/logical/mssql/secret_creds.go | 5 +- builtin/logical/mysql/backend_test.go | 5 +- .../logical/mysql/path_config_connection.go | 11 +- builtin/logical/mysql/path_config_lease.go | 5 +- builtin/logical/mysql/path_role_create.go | 2 +- builtin/logical/mysql/secret_creds.go | 4 +- builtin/logical/nomad/backend_test.go | 1 - builtin/logical/nomad/path_config_access.go | 12 +- builtin/logical/nomad/path_config_lease.go | 4 +- builtin/logical/nomad/path_creds_create.go | 2 +- builtin/logical/nomad/path_roles.go | 8 +- builtin/logical/nomad/secret_token.go | 2 +- builtin/logical/pki/backend_test.go | 49 +- builtin/logical/pki/cert_util.go | 25 +- builtin/logical/pki/cert_util_test.go | 4 +- builtin/logical/pki/crl_test.go | 2 +- builtin/logical/pki/path_config_ca.go | 2 +- builtin/logical/pki/path_config_crl.go | 4 +- builtin/logical/pki/path_config_urls.go | 6 +- builtin/logical/pki/path_fetch.go | 5 +- builtin/logical/pki/path_intermediate.go | 2 +- builtin/logical/pki/path_issue_sign.go | 1 - builtin/logical/pki/path_revoke.go | 2 +- builtin/logical/pki/path_roles.go | 80 +- builtin/logical/pki/path_root.go | 2 +- builtin/logical/pki/path_tidy.go | 10 +- builtin/logical/pki/secret_certs.go | 6 +- builtin/logical/postgresql/backend_test.go | 3 - .../postgresql/path_config_connection.go | 10 +- .../logical/postgresql/path_config_lease.go | 5 +- .../logical/postgresql/path_role_create.go | 2 +- builtin/logical/postgresql/secret_creds.go | 4 +- builtin/logical/rabbitmq/backend_test.go | 1 - .../rabbitmq/path_config_connection.go | 10 +- builtin/logical/rabbitmq/path_config_lease.go | 4 +- builtin/logical/rabbitmq/path_role_create.go | 2 +- builtin/logical/rabbitmq/path_roles.go | 8 +- builtin/logical/rabbitmq/secret_creds.go | 4 +- builtin/logical/ssh/backend_test.go | 46 +- builtin/logical/ssh/communicator.go | 2 +- builtin/logical/ssh/path_config_ca.go | 6 +- .../logical/ssh/path_config_zeroaddress.go | 2 +- builtin/logical/ssh/path_creds_create.go | 6 +- builtin/logical/ssh/path_keys.go | 4 +- builtin/logical/ssh/path_lookup.go | 2 +- builtin/logical/ssh/path_roles.go | 56 +- builtin/logical/ssh/path_sign.go | 16 +- builtin/logical/ssh/path_verify.go | 2 +- builtin/logical/ssh/secret_dynamic_key.go | 4 +- builtin/logical/ssh/secret_otp.go | 2 +- builtin/logical/totp/backend_test.go | 13 +- builtin/logical/totp/path_code.go | 5 +- builtin/logical/totp/path_keys.go | 16 +- builtin/logical/transit/backend.go | 1 - builtin/logical/transit/backend_test.go | 13 +- builtin/logical/transit/path_backup.go | 8 +- builtin/logical/transit/path_cache_config.go | 2 +- builtin/logical/transit/path_config.go | 12 +- builtin/logical/transit/path_datakey.go | 12 +- builtin/logical/transit/path_decrypt.go | 8 +- builtin/logical/transit/path_decrypt_test.go | 3 +- builtin/logical/transit/path_export.go | 6 +- builtin/logical/transit/path_export_test.go | 2 +- builtin/logical/transit/path_hash.go | 8 +- builtin/logical/transit/path_hmac.go | 15 +- builtin/logical/transit/path_keys.go | 14 +- builtin/logical/transit/path_random.go | 6 +- builtin/logical/transit/path_random_test.go | 1 - builtin/logical/transit/path_restore.go | 12 +- builtin/logical/transit/path_rewrap.go | 10 +- builtin/logical/transit/path_rotate.go | 2 +- builtin/logical/transit/path_sign_verify.go | 4 +- builtin/logical/transit/path_trim.go | 4 +- builtin/plugin/backend.go | 2 - builtin/plugin/backend_lazyLoad_test.go | 6 +- command/agent.go | 11 +- command/agent/approle_end_to_end_test.go | 34 +- command/agent/auth/azure/azure.go | 1 - .../agent/auth/kubernetes/kubernetes_test.go | 1 - ...auto_auth_preload_token_end_to_end_test.go | 4 +- command/agent/aws_end_to_end_test.go | 1 - command/agent/cache/api_proxy_test.go | 5 +- command/agent/cache/cache_test.go | 2 +- command/agent/cache/cacheboltdb/bolt.go | 4 +- command/agent/cache/cacheboltdb/bolt_test.go | 3 +- command/agent/cache/cachememdb/cache_memdb.go | 16 +- command/agent/cache/cachememdb/index_test.go | 1 - command/agent/cache/keymanager/passthrough.go | 1 - command/agent/cache/lease_cache_test.go | 3 - command/agent/cache/listener.go | 1 - command/agent/cache_end_to_end_test.go | 4 +- command/agent/cert_end_to_end_test.go | 3 +- command/agent/config/config_test.go | 28 +- command/agent/jwt_end_to_end_test.go | 6 +- command/agent/sink/file/file_sink.go | 2 +- command/agent/sink/file/file_sink_test.go | 6 +- command/agent/sink/file/sink_test.go | 2 +- command/agent/template/template_test.go | 22 +- command/agent_test.go | 13 +- command/audit_disable.go | 6 +- command/audit_enable.go | 6 +- command/audit_list.go | 10 +- command/auth_disable.go | 6 +- command/auth_enable.go | 6 +- command/auth_help.go | 6 +- command/auth_list.go | 6 +- command/auth_tune.go | 6 +- command/base_flags.go | 2 +- command/base_predict.go | 6 +- command/commands.go | 8 +- command/debug.go | 22 +- command/debug_test.go | 2 +- command/delete.go | 6 +- command/format_test.go | 1 + command/kv_delete.go | 6 +- command/kv_destroy.go | 6 +- command/kv_enable_versioning.go | 6 +- command/kv_get.go | 6 +- command/kv_list.go | 8 +- command/kv_metadata_delete.go | 6 +- command/kv_metadata_get.go | 6 +- command/kv_metadata_put.go | 6 +- command/kv_patch.go | 6 +- command/kv_put.go | 6 +- command/kv_rollback.go | 6 +- command/kv_test.go | 1 - command/kv_undelete.go | 6 +- command/lease_lookup.go | 6 +- command/lease_renew.go | 6 +- command/lease_revoke.go | 6 +- command/list.go | 8 +- command/monitor.go | 6 +- command/namespace_create.go | 6 +- command/namespace_delete.go | 6 +- command/namespace_list.go | 6 +- command/namespace_lookup.go | 6 +- command/operator_diagnose.go | 18 +- command/operator_generate_root.go | 6 +- command/operator_init.go | 6 +- command/operator_key_status.go | 6 +- command/operator_migrate.go | 7 +- command/operator_migrate_test.go | 5 +- command/operator_raft_autopilot_get_config.go | 6 +- command/operator_raft_autopilot_set_config.go | 6 +- command/operator_raft_autopilot_state.go | 6 +- command/operator_raft_join.go | 6 +- command/operator_raft_listpeers.go | 6 +- command/operator_raft_remove_peer.go | 6 +- command/operator_raft_snapshot_restore.go | 6 +- command/operator_raft_snapshot_save.go | 15 +- command/operator_rekey.go | 6 +- command/operator_seal.go | 6 +- command/operator_step_down.go | 6 +- command/operator_unseal.go | 6 +- command/operator_usage.go | 7 +- command/path_help.go | 6 +- command/pgp_test.go | 8 +- command/plugin_deregister.go | 6 +- command/plugin_info.go | 6 +- command/plugin_list.go | 6 +- command/plugin_register.go | 6 +- command/plugin_reload.go | 6 +- command/plugin_reload_status.go | 10 +- command/plugin_reload_test.go | 2 - command/plugin_test.go | 2 +- command/policy_delete.go | 6 +- command/policy_fmt.go | 8 +- command/policy_list.go | 6 +- command/policy_read.go | 6 +- command/policy_write.go | 6 +- command/print.go | 6 +- command/print_token.go | 6 +- command/read.go | 6 +- command/rotate.go | 6 +- command/secrets_disable.go | 6 +- command/secrets_enable.go | 6 +- command/secrets_enable_test.go | 8 +- command/secrets_list.go | 6 +- command/secrets_move.go | 6 +- command/secrets_tune.go | 6 +- command/server.go | 18 +- command/server/config_test_helpers.go | 9 +- command/server/config_util.go | 3 +- command/server/listener.go | 7 +- command/server/listener_tcp_test.go | 2 - .../server/server_seal_transit_acc_test.go | 6 +- command/server_profile.go | 2 +- command/server_test.go | 12 +- command/server_util.go | 4 +- command/ssh.go | 12 +- command/ssh_test.go | 5 +- command/status.go | 6 +- command/token/helper_internal.go | 2 +- command/token/helper_internal_test.go | 4 +- command/token_capabilities.go | 6 +- command/token_create.go | 6 +- command/token_lookup.go | 6 +- command/token_renew.go | 6 +- command/token_revoke.go | 6 +- command/unwrap.go | 6 +- command/version.go | 6 +- command/write.go | 6 +- go.mod | 3 +- go.sum | 14 + helper/dhutil/dhutil.go | 3 +- helper/forwarding/util.go | 2 +- helper/metricsutil/bucket.go | 1 - helper/metricsutil/gauge_process.go | 4 +- helper/metricsutil/gauge_process_test.go | 1 - helper/metricsutil/metricsutil.go | 5 +- helper/metricsutil/wrapped_metrics.go | 6 +- helper/metricsutil/wrapped_metrics_test.go | 1 - helper/mfa/duo/path_duo_access.go | 6 +- helper/mfa/duo/path_duo_config.go | 6 +- helper/mfa/mfa_test.go | 2 +- helper/mfa/path_mfa_config.go | 2 +- helper/pgpkeys/flag.go | 1 - helper/pgpkeys/flag_test.go | 16 +- helper/policies/policies.go | 4 +- helper/random/serializing.go | 2 +- helper/testhelpers/azurite/azurite.go | 6 +- helper/testhelpers/docker/testhelpers.go | 2 +- helper/testhelpers/ldap/ldaphelper.go | 3 +- helper/testhelpers/minio/miniohelper.go | 6 +- helper/testhelpers/mongodb/mongodbhelper.go | 1 - helper/testhelpers/mysql/mysqlhelper.go | 1 - .../postgresql/postgresqlhelper.go | 3 +- helper/testhelpers/testhelpers.go | 8 +- helper/testhelpers/teststorage/teststorage.go | 4 +- .../teststorage/teststorage_reusable.go | 5 +- helper/xor/xor.go | 2 +- http/auth_token_test.go | 2 - http/forwarding_bench_test.go | 2 +- http/forwarding_test.go | 14 +- http/handler.go | 1 - http/handler_test.go | 2 - http/logical_test.go | 2 +- http/plugin_test.go | 1 - http/sys_config_cors_test.go | 1 - http/sys_config_state_test.go | 1 - http/sys_health.go | 1 - http/sys_health_test.go | 2 - http/sys_leader.go | 3 +- http/sys_monitor_test.go | 1 - http/sys_mount_test.go | 1 - http/sys_seal_test.go | 1 - internalshared/configutil/config_util.go | 3 +- internalshared/configutil/encrypt_decrypt.go | 2 +- .../configutil/encrypt_decrypt_test.go | 2 + internalshared/configutil/telemetry.go | 1 - internalshared/listenerutil/listener_test.go | 1 - internalshared/reloadutil/reload_test.go | 4 +- physical/aerospike/aerospike_test.go | 1 - physical/alicloudoss/alicloudoss.go | 1 - physical/alicloudoss/alicloudoss_test.go | 2 +- physical/cassandra/cassandra.go | 5 +- physical/cassandra/cassandra_test.go | 11 +- physical/cockroachdb/cockroachdb_test.go | 1 - physical/cockroachdb/keywords.go | 866 ++++++----- physical/consul/consul.go | 10 +- physical/couchdb/couchdb.go | 8 +- physical/dynamodb/dynamodb.go | 10 +- physical/dynamodb/dynamodb_test.go | 4 +- physical/etcd/etcd2.go | 8 +- physical/etcd/etcd3.go | 9 +- physical/foundationdb/foundationdb.go | 23 +- physical/gcs/gcs_ha.go | 6 +- physical/manta/manta.go | 4 +- physical/mssql/mssql_test.go | 2 - physical/mysql/mysql.go | 11 +- physical/mysql/mysql_test.go | 2 +- physical/oci/oci_ha.go | 6 +- physical/postgresql/postgresql.go | 6 +- physical/postgresql/postgresql_test.go | 8 +- physical/raft/chunking_test.go | 1 - physical/raft/fsm.go | 14 +- physical/raft/fsm_test.go | 2 +- physical/raft/logstore/bolt_store.go | 2 +- physical/raft/raft.go | 25 +- physical/raft/raft_autopilot.go | 2 +- physical/raft/raft_test.go | 10 +- physical/raft/snapshot.go | 8 +- physical/raft/snapshot_test.go | 3 +- physical/s3/s3.go | 3 - physical/s3/s3_test.go | 2 +- physical/spanner/spanner.go | 6 +- physical/spanner/spanner_ha.go | 6 +- physical/swift/swift.go | 7 +- physical/zookeeper/zookeeper.go | 8 +- physical/zookeeper/zookeeper_test.go | 2 - plugins/database/hana/hana.go | 1 - .../database/influxdb/connection_producer.go | 2 +- plugins/database/influxdb/influxdb_test.go | 3 +- .../mongodb/connection_producer_test.go | 8 +- plugins/database/mssql/mssql.go | 1 + .../mysql/connection_producer_test.go | 10 +- plugins/database/mysql/mysql_test.go | 9 +- plugins/database/postgresql/postgresql.go | 1 - .../database/postgresql/postgresql_test.go | 15 +- plugins/database/redshift/redshift.go | 3 +- sdk/database/dbplugin/client.go | 5 +- sdk/database/dbplugin/grpc_transport.go | 6 +- sdk/database/dbplugin/plugin.go | 6 +- sdk/database/dbplugin/server.go | 4 +- sdk/database/dbplugin/v5/conversions_test.go | 1 + .../dbplugin/v5/grpc_database_plugin.go | 6 +- sdk/database/dbplugin/v5/grpc_server_test.go | 6 +- sdk/database/dbplugin/v5/plugin_client.go | 2 +- sdk/database/dbplugin/v5/plugin_server.go | 2 +- sdk/database/helper/connutil/connutil.go | 4 +- sdk/database/helper/credsutil/credsutil.go | 3 +- sdk/database/helper/dbutil/dbutil_test.go | 1 - sdk/framework/backend.go | 6 +- sdk/framework/backend_test.go | 33 +- sdk/framework/field_data_test.go | 124 +- sdk/framework/filter.go | 3 +- sdk/framework/openapi.go | 24 +- sdk/framework/openapi_test.go | 4 +- sdk/framework/path.go | 2 +- sdk/framework/path_map.go | 6 +- sdk/framework/path_struct_test.go | 2 +- sdk/framework/path_test.go | 1 - sdk/framework/policy_map.go | 2 +- sdk/helper/awsutil/generate_credentials.go | 5 +- sdk/helper/awsutil/region_test.go | 5 +- sdk/helper/base62/base62.go | 6 +- sdk/helper/certutil/helpers.go | 2 +- sdk/helper/certutil/types.go | 12 +- sdk/helper/compressutil/compress_test.go | 21 +- sdk/helper/consts/replication.go | 7 +- sdk/helper/dbtxn/dbtxn.go | 3 - sdk/helper/identitytpl/templating.go | 1 - sdk/helper/identitytpl/templating_test.go | 6 +- sdk/helper/jsonutil/json_test.go | 2 +- sdk/helper/kdf/kdf_test.go | 13 +- .../keysutil/encrypted_key_storage_test.go | 1 - sdk/helper/keysutil/lock_manager.go | 4 +- sdk/helper/keysutil/policy.go | 2 +- sdk/helper/keysutil/policy_test.go | 6 +- sdk/helper/ldaputil/client_test.go | 6 +- sdk/helper/logging/logging.go | 1 - sdk/helper/logging/logging_test.go | 10 +- sdk/helper/password/password_test.go | 2 +- sdk/helper/pathmanager/pathmanager_test.go | 20 +- sdk/helper/pluginutil/run_config_test.go | 16 +- sdk/helper/policyutil/policyutil.go | 4 +- sdk/helper/strutil/strutil_test.go | 91 +- sdk/helper/tokenutil/tokenutil.go | 18 +- sdk/logical/storage_test.go | 3 - sdk/logical/storage_view.go | 4 +- sdk/physical/cache.go | 10 +- sdk/physical/encoding.go | 12 +- sdk/physical/error.go | 6 +- sdk/physical/file/file.go | 12 +- sdk/physical/file/file_test.go | 4 +- sdk/physical/inmem/cache_test.go | 1 - sdk/physical/inmem/inmem.go | 14 +- sdk/physical/latency.go | 6 +- sdk/physical/physical_view.go | 4 +- sdk/physical/testing.go | 10 +- sdk/plugin/backend.go | 6 +- sdk/plugin/grpc_backend_client.go | 6 +- sdk/plugin/grpc_system_test.go | 2 +- sdk/plugin/logger.go | 2 - sdk/plugin/logger_test.go | 4 +- sdk/plugin/mock/path_errors.go | 9 +- sdk/plugin/mock/path_internal.go | 2 +- sdk/plugin/mock/path_kv.go | 10 +- sdk/plugin/mock/path_raw.go | 1 - sdk/plugin/mock/path_special.go | 1 - sdk/plugin/pb/translation_test.go | 20 +- sdk/plugin/plugin.go | 4 +- sdk/plugin/serve.go | 4 +- sdk/plugin/storage_test.go | 1 - .../environments/docker/environment.go | 16 +- sdk/testing/stepwise/helpers.go | 3 +- sdk/testing/stepwise/stepwise_test.go | 2 +- .../consul/consul_service_registration.go | 4 +- .../consul_service_registration_test.go | 8 +- .../kubernetes/client/cmd/kubeclient/main.go | 10 +- shamir/shamir.go | 2 +- shamir/shamir_test.go | 2 +- shamir/tables.go | 6 +- tools/tools.go | 27 +- vault/acl.go | 2 +- vault/acl_test.go | 28 +- vault/activity/query_test.go | 7 +- vault/activity_log.go | 2 - vault/activity_log_test.go | 27 +- vault/activity_log_testing_util.go | 7 +- vault/audit.go | 6 +- vault/audit_test.go | 21 +- vault/audited_headers_test.go | 34 +- vault/auth_test.go | 12 +- vault/barrier_aes_gcm.go | 1 - vault/barrier_aes_gcm_test.go | 4 +- vault/barrier_view_test.go | 36 +- vault/cluster/inmem_layer.go | 1 + vault/cluster_test.go | 4 +- vault/core.go | 4 +- vault/core_test.go | 19 +- vault/core_util.go | 7 +- vault/counters.go | 2 - vault/counters_test.go | 2 +- vault/diagnose/tls.go | 1 - vault/dynamic_system_view_test.go | 3 + vault/expiration.go | 5 +- vault/expiration_integ_test.go | 1 - vault/expiration_test.go | 11 +- .../external_tests/identity/entities_test.go | 1 - .../metrics/core_metrics_int_test.go | 1 - vault/external_tests/misc/recovery_test.go | 2 +- .../policy/acl_templating_test.go | 2 +- vault/external_tests/quotas/quotas_test.go | 20 +- vault/external_tests/raft/raft_test.go | 15 +- vault/external_tests/raftha/raft_ha_test.go | 2 +- .../response/allowed_response_headers_test.go | 4 +- .../sealmigration/seal_migration_test.go | 1 - .../sealmigration/testshared.go | 38 +- vault/external_tests/token/token_test.go | 2 +- vault/ha.go | 3 +- vault/ha_test.go | 12 +- vault/identity_store_entities.go | 2 +- vault/identity_store_entities_test.go | 1 - vault/identity_store_oidc.go | 27 +- vault/identity_store_oidc_test.go | 18 +- vault/identity_store_schema.go | 26 +- vault/identity_store_structs.go | 6 +- vault/identity_store_test.go | 1 - vault/identity_store_util.go | 5 +- vault/init.go | 2 +- vault/logical_passthrough.go | 2 +- vault/logical_raw.go | 22 +- vault/logical_system.go | 8 +- vault/logical_system_activity.go | 4 +- vault/logical_system_paths.go | 290 ++-- vault/logical_system_raft.go | 1 - vault/logical_system_test.go | 7 +- vault/mount.go | 60 +- vault/mount_test.go | 14 +- vault/namespaces.go | 4 +- vault/plugin_catalog.go | 1 - vault/plugin_catalog_test.go | 7 +- vault/policy.go | 20 +- vault/policy_store_test.go | 2 +- vault/policy_test.go | 12 +- vault/quotas/quotas_util.go | 3 +- vault/rekey.go | 4 +- vault/request_handling.go | 6 +- vault/request_handling_test.go | 2 - vault/rollback.go | 1 - vault/rollback_test.go | 2 +- vault/router.go | 16 +- vault/seal.go | 3 +- vault/seal_autoseal.go | 1 - vault/sealunwrapper.go | 6 +- vault/testing.go | 48 +- vault/token_store.go | 57 +- vault/token_store_test.go | 3 +- vault/wrapping.go | 2 +- .../github.com/hashicorp/vault/api/client.go | 50 +- .../hashicorp/vault/api/lifetime_watcher.go | 6 +- .../hashicorp/vault/api/output_string.go | 4 +- .../hashicorp/vault/api/plugin_helpers.go | 2 +- .../hashicorp/vault/api/sys_audit.go | 2 - .../hashicorp/vault/api/sys_auth.go | 10 +- .../hashicorp/vault/api/sys_plugins.go | 2 - .../vault/sdk/database/dbplugin/client.go | 5 +- .../sdk/database/dbplugin/grpc_transport.go | 6 +- .../vault/sdk/database/dbplugin/plugin.go | 6 +- .../vault/sdk/database/dbplugin/server.go | 4 +- .../dbplugin/v5/grpc_database_plugin.go | 6 +- .../sdk/database/dbplugin/v5/plugin_client.go | 2 +- .../sdk/database/dbplugin/v5/plugin_server.go | 2 +- .../sdk/database/helper/connutil/connutil.go | 4 +- .../database/helper/credsutil/credsutil.go | 3 +- .../hashicorp/vault/sdk/framework/backend.go | 6 +- .../hashicorp/vault/sdk/framework/filter.go | 3 +- .../hashicorp/vault/sdk/framework/openapi.go | 24 +- .../hashicorp/vault/sdk/framework/path.go | 2 +- .../hashicorp/vault/sdk/framework/path_map.go | 6 +- .../vault/sdk/framework/policy_map.go | 2 +- .../helper/awsutil/generate_credentials.go | 5 +- .../vault/sdk/helper/base62/base62.go | 6 +- .../vault/sdk/helper/certutil/helpers.go | 2 +- .../vault/sdk/helper/certutil/types.go | 12 +- .../vault/sdk/helper/consts/replication.go | 7 +- .../hashicorp/vault/sdk/helper/dbtxn/dbtxn.go | 3 - .../sdk/helper/identitytpl/templating.go | 1 - .../vault/sdk/helper/keysutil/lock_manager.go | 4 +- .../vault/sdk/helper/keysutil/policy.go | 2 +- .../vault/sdk/helper/logging/logging.go | 1 - .../vault/sdk/helper/policyutil/policyutil.go | 4 +- .../vault/sdk/helper/tokenutil/tokenutil.go | 18 +- .../vault/sdk/logical/storage_view.go | 4 +- .../hashicorp/vault/sdk/physical/cache.go | 10 +- .../hashicorp/vault/sdk/physical/encoding.go | 12 +- .../hashicorp/vault/sdk/physical/error.go | 6 +- .../hashicorp/vault/sdk/physical/file/file.go | 12 +- .../vault/sdk/physical/inmem/inmem.go | 14 +- .../hashicorp/vault/sdk/physical/latency.go | 6 +- .../vault/sdk/physical/physical_view.go | 4 +- .../hashicorp/vault/sdk/physical/testing.go | 10 +- .../hashicorp/vault/sdk/plugin/backend.go | 6 +- .../vault/sdk/plugin/grpc_backend_client.go | 6 +- .../hashicorp/vault/sdk/plugin/logger.go | 2 - .../vault/sdk/plugin/mock/path_errors.go | 9 +- .../vault/sdk/plugin/mock/path_internal.go | 2 +- .../vault/sdk/plugin/mock/path_kv.go | 10 +- .../vault/sdk/plugin/mock/path_raw.go | 1 - .../vault/sdk/plugin/mock/path_special.go | 1 - .../hashicorp/vault/sdk/plugin/plugin.go | 4 +- .../hashicorp/vault/sdk/plugin/serve.go | 4 +- .../environments/docker/environment.go | 16 +- .../vault/sdk/testing/stepwise/helpers.go | 3 +- vendor/golang.org/x/mod/module/module.go | 80 +- vendor/golang.org/x/mod/semver/semver.go | 3 + .../x/tools/cmd/goimports/goimports.go | 13 +- .../x/tools/go/analysis/analysis.go | 13 +- vendor/golang.org/x/tools/go/analysis/doc.go | 30 +- .../x/tools/go/analysis/validate.go | 33 +- .../x/tools/go/gcexportdata/gcexportdata.go | 12 +- .../x/tools/go/internal/gcimporter/bexport.go | 852 +++++++++++ .../x/tools/go/internal/gcimporter/bimport.go | 1039 +++++++++++++ .../go/internal/gcimporter/gcimporter.go | 13 +- .../x/tools/go/internal/gcimporter/iexport.go | 20 - .../x/tools/go/internal/gcimporter/iimport.go | 170 --- .../tools/go/internal/packagesdriver/sizes.go | 78 +- .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 347 ++++- .../x/tools/go/packages/golist_overlay.go | 236 ++- .../x/tools/go/packages/packages.go | 61 +- .../internal/analysisinternal/analysis.go | 343 ++++- .../x/tools/internal/event/core/event.go | 2 +- .../x/tools/internal/gocommand/invoke.go | 196 ++- .../x/tools/internal/gocommand/vendor.go | 102 ++ .../x/tools/internal/gocommand/version.go | 40 + .../x/tools/internal/gopathwalk/walk.go | 11 - .../x/tools/internal/imports/fix.go | 319 ++-- .../x/tools/internal/imports/imports.go | 93 +- .../x/tools/internal/imports/mod.go | 158 +- .../x/tools/internal/imports/sortimports.go | 20 +- .../x/tools/internal/imports/zstdlib.go | 52 + .../x/tools/internal/lsp/fuzzy/input.go | 168 ++ .../x/tools/internal/lsp/fuzzy/matcher.go | 398 +++++ .../internal/packagesinternal/packages.go | 5 + .../tools/internal/typesinternal/errorcode.go | 1358 +++++++++++++++++ .../typesinternal/errorcode_string.go | 152 ++ .../x/tools/internal/typesinternal/types.go | 45 + vendor/modules.txt | 15 +- vendor/mvdan.cc/gofumpt/.gitattributes | 2 + vendor/mvdan.cc/gofumpt/CHANGELOG.md | 35 + vendor/mvdan.cc/gofumpt/LICENSE | 27 + vendor/mvdan.cc/gofumpt/LICENSE.google | 27 + vendor/mvdan.cc/gofumpt/README.md | 417 +++++ vendor/mvdan.cc/gofumpt/doc.go | 14 + vendor/mvdan.cc/gofumpt/flag.go | 16 + vendor/mvdan.cc/gofumpt/format/format.go | 703 +++++++++ vendor/mvdan.cc/gofumpt/go.mod | 11 + vendor/mvdan.cc/gofumpt/go.sum | 40 + vendor/mvdan.cc/gofumpt/gofmt.go | 323 ++++ vendor/mvdan.cc/gofumpt/internal.go | 176 +++ vendor/mvdan.cc/gofumpt/internal/diff/diff.go | 58 + vendor/mvdan.cc/gofumpt/rewrite.go | 309 ++++ vendor/mvdan.cc/gofumpt/simplify.go | 165 ++ vendor/mvdan.cc/gofumpt/version.go | 28 + 658 files changed, 10961 insertions(+), 3671 deletions(-) create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go create mode 100644 vendor/mvdan.cc/gofumpt/.gitattributes create mode 100644 vendor/mvdan.cc/gofumpt/CHANGELOG.md create mode 100644 vendor/mvdan.cc/gofumpt/LICENSE create mode 100644 vendor/mvdan.cc/gofumpt/LICENSE.google create mode 100644 vendor/mvdan.cc/gofumpt/README.md create mode 100644 vendor/mvdan.cc/gofumpt/doc.go create mode 100644 vendor/mvdan.cc/gofumpt/flag.go create mode 100644 vendor/mvdan.cc/gofumpt/format/format.go create mode 100644 vendor/mvdan.cc/gofumpt/go.mod create mode 100644 vendor/mvdan.cc/gofumpt/go.sum create mode 100644 vendor/mvdan.cc/gofumpt/gofmt.go create mode 100644 vendor/mvdan.cc/gofumpt/internal.go create mode 100644 vendor/mvdan.cc/gofumpt/internal/diff/diff.go create mode 100644 vendor/mvdan.cc/gofumpt/rewrite.go create mode 100644 vendor/mvdan.cc/gofumpt/simplify.go create mode 100644 vendor/mvdan.cc/gofumpt/version.go diff --git a/Makefile b/Makefile index ed05c013b5..582c050fff 100644 --- a/Makefile +++ b/Makefile @@ -210,7 +210,7 @@ fmtcheck: #@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" fmt: - goimports -w $(GOFMT_FILES) + find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -w assetcheck: @echo "==> Checking compiled UI assets..." diff --git a/api/client.go b/api/client.go index d548009768..ce5f7798b2 100644 --- a/api/client.go +++ b/api/client.go @@ -25,26 +25,30 @@ import ( "golang.org/x/time/rate" ) -const EnvVaultAddress = "VAULT_ADDR" -const EnvVaultAgentAddr = "VAULT_AGENT_ADDR" -const EnvVaultCACert = "VAULT_CACERT" -const EnvVaultCAPath = "VAULT_CAPATH" -const EnvVaultClientCert = "VAULT_CLIENT_CERT" -const EnvVaultClientKey = "VAULT_CLIENT_KEY" -const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" -const EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" -const EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" -const EnvVaultNamespace = "VAULT_NAMESPACE" -const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" -const EnvVaultWrapTTL = "VAULT_WRAP_TTL" -const EnvVaultMaxRetries = "VAULT_MAX_RETRIES" -const EnvVaultToken = "VAULT_TOKEN" -const EnvVaultMFA = "VAULT_MFA" -const EnvRateLimit = "VAULT_RATE_LIMIT" +const ( + EnvVaultAddress = "VAULT_ADDR" + EnvVaultAgentAddr = "VAULT_AGENT_ADDR" + EnvVaultCACert = "VAULT_CACERT" + EnvVaultCAPath = "VAULT_CAPATH" + EnvVaultClientCert = "VAULT_CLIENT_CERT" + EnvVaultClientKey = "VAULT_CLIENT_KEY" + EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + EnvVaultNamespace = "VAULT_NAMESPACE" + EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" + EnvVaultWrapTTL = "VAULT_WRAP_TTL" + EnvVaultMaxRetries = "VAULT_MAX_RETRIES" + EnvVaultToken = "VAULT_TOKEN" + EnvVaultMFA = "VAULT_MFA" + EnvRateLimit = "VAULT_RATE_LIMIT" +) // Deprecated values -const EnvVaultAgentAddress = "VAULT_AGENT_ADDR" -const EnvVaultInsecure = "VAULT_SKIP_VERIFY" +const ( + EnvVaultAgentAddress = "VAULT_AGENT_ADDR" + EnvVaultInsecure = "VAULT_SKIP_VERIFY" +) // WrappingLookupFunc is a function that, given an HTTP verb and a path, // returns an optional string duration to be used for response wrapping (e.g. @@ -359,7 +363,6 @@ func (c *Config) ReadEnvironment() error { } func parseRateLimit(val string) (rate float64, burst int, err error) { - _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) if err != nil { rate, err = strconv.ParseFloat(val, 64) @@ -370,7 +373,6 @@ func parseRateLimit(val string) (rate float64, burst int, err error) { } return rate, burst, err - } // Client is the client to the Vault API. Create a client with NewClient. @@ -793,7 +795,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { policyOverride := c.policyOverride c.modifyLock.RUnlock() - var host = addr.Host + host := addr.Host // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV // record and take the highest match; this is not designed for high-availability, just discovery // Internet Draft specifies that the SRV record is ignored if a port is given @@ -985,8 +987,10 @@ START: return result, nil } -type RequestCallback func(*Request) -type ResponseCallback func(*Response) +type ( + RequestCallback func(*Request) + ResponseCallback func(*Response) +) // WithRequestCallbacks makes a shallow clone of Client, modifies it to use // the given callbacks, and returns it. Each of the callbacks will be invoked diff --git a/api/client_test.go b/api/client_test.go index 155edebdfe..87900856af 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -369,8 +369,8 @@ func TestParsingRateOnly(t *testing.T) { } func TestParsingErrorCase(t *testing.T) { - var incorrectFormat = "foobar" - var _, _, err = parseRateLimit(incorrectFormat) + incorrectFormat := "foobar" + _, _, err := parseRateLimit(incorrectFormat) if err == nil { t.Error("Expected error, found no error") } diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go index 841c51c094..79e4c20433 100644 --- a/api/lifetime_watcher.go +++ b/api/lifetime_watcher.go @@ -380,5 +380,7 @@ func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) { r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) } -type Renewer = LifetimeWatcher -type RenewerInput = LifetimeWatcherInput +type ( + Renewer = LifetimeWatcher + RenewerInput = LifetimeWatcherInput +) diff --git a/api/output_string.go b/api/output_string.go index b836b77a5a..5979c56567 100644 --- a/api/output_string.go +++ b/api/output_string.go @@ -11,9 +11,7 @@ const ( ErrOutputStringRequest = "output a string, please" ) -var ( - LastOutputStringError *OutputStringError -) +var LastOutputStringError *OutputStringError type OutputStringError struct { *retryablehttp.Request diff --git a/api/plugin_helpers.go b/api/plugin_helpers.go index 3aa4e6e46a..c2978b388f 100644 --- a/api/plugin_helpers.go +++ b/api/plugin_helpers.go @@ -82,7 +82,7 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) } - var allClaims = make(map[string]interface{}) + allClaims := make(map[string]interface{}) if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) } diff --git a/api/ssh_agent_test.go b/api/ssh_agent_test.go index 3ca5fdf2b7..d233b09c47 100644 --- a/api/ssh_agent_test.go +++ b/api/ssh_agent_test.go @@ -33,7 +33,7 @@ func TestSSH_CreateTLSClient(t *testing.T) { func TestSSH_CreateTLSClient_tlsServerName(t *testing.T) { // Ensure that the HTTP client is associated with the configured TLS server name. - var tlsServerName = "tls.server.name" + tlsServerName := "tls.server.name" config, err := ParseSSHHelperConfig(fmt.Sprintf(` vault_addr = "1.2.3.4" @@ -93,13 +93,12 @@ nope = "bad" } func TestParseSSHHelperConfig_tlsServerName(t *testing.T) { - var tlsServerName = "tls.server.name" + tlsServerName := "tls.server.name" config, err := ParseSSHHelperConfig(fmt.Sprintf(` vault_addr = "1.2.3.4" tls_server_name = "%s" `, tlsServerName)) - if err != nil { t.Fatal(err) } diff --git a/api/sys_audit.go b/api/sys_audit.go index 5fa6f3585d..d0c6408366 100644 --- a/api/sys_audit.go +++ b/api/sys_audit.go @@ -52,7 +52,6 @@ func (c *Sys) ListAudit() (map[string]*Audit, error) { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) - if err != nil { return nil, err } @@ -94,7 +93,6 @@ func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) e ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) - if err != nil { return err } diff --git a/api/sys_auth.go b/api/sys_auth.go index e7a9c222d8..46abae4eff 100644 --- a/api/sys_auth.go +++ b/api/sys_auth.go @@ -74,7 +74,9 @@ func (c *Sys) DisableAuth(path string) error { } // Rather than duplicate, we can use modern Go's type aliasing -type EnableAuthOptions = MountInput -type AuthConfigInput = MountConfigInput -type AuthMount = MountOutput -type AuthConfigOutput = MountConfigOutput +type ( + EnableAuthOptions = MountInput + AuthConfigInput = MountConfigInput + AuthMount = MountOutput + AuthConfigOutput = MountConfigOutput +) diff --git a/api/sys_plugins.go b/api/sys_plugins.go index d90bcd0ab3..c17072d958 100644 --- a/api/sys_plugins.go +++ b/api/sys_plugins.go @@ -109,7 +109,6 @@ func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { for i, nameIfc := range pluginsIfc { name, ok := nameIfc.(string) if !ok { - } plugins[i] = name } @@ -323,7 +322,6 @@ func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*R return &r, nil } return nil, nil - } // catalogPathByType is a helper to construct the proper API path by plugin type diff --git a/audit/format_json_test.go b/audit/format_json_test.go index bfffe501b8..e2d8b3b086 100644 --- a/audit/format_json_test.go +++ b/audit/format_json_test.go @@ -4,14 +4,12 @@ import ( "bytes" "context" "encoding/json" + "errors" + "fmt" "strings" "testing" "time" - "errors" - - "fmt" - "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/salt" @@ -61,7 +59,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { TTL: 60 * time.Second, }, Headers: map[string][]string{ - "foo": []string{"bar"}, + "foo": {"bar"}, }, }, errors.New("this is an error"), @@ -92,7 +90,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { TTL: 60 * time.Second, }, Headers: map[string][]string{ - "foo": []string{"bar"}, + "foo": {"bar"}, }, }, errors.New("this is an error"), @@ -125,14 +123,14 @@ func TestFormatJSON_formatRequest(t *testing.T) { t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) } - var expectedjson = new(AuditRequestEntry) + expectedjson := new(AuditRequestEntry) if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil { t.Fatalf("bad json: %s", err) } expectedjson.Request.Namespace = &AuditNamespace{ID: "root"} - var actualjson = new(AuditRequestEntry) + actualjson := new(AuditRequestEntry) if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil { t.Fatalf("bad json: %s", err) } diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go index dae1f9fcdf..6774f01fb6 100644 --- a/audit/format_jsonx_test.go +++ b/audit/format_jsonx_test.go @@ -3,14 +3,12 @@ package audit import ( "bytes" "context" + "errors" + "fmt" "strings" "testing" "time" - "errors" - - "fmt" - "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" @@ -63,7 +61,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) { TTL: 60 * time.Second, }, Headers: map[string][]string{ - "foo": []string{"bar"}, + "foo": {"bar"}, }, PolicyOverride: true, }, @@ -100,7 +98,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) { TTL: 60 * time.Second, }, Headers: map[string][]string{ - "foo": []string{"bar"}, + "foo": {"bar"}, }, PolicyOverride: true, }, diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go index 5cb1d9b602..0c130ee450 100644 --- a/builtin/audit/file/backend.go +++ b/builtin/audit/file/backend.go @@ -73,7 +73,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err } // Check if mode is provided - mode := os.FileMode(0600) + mode := os.FileMode(0o600) if modeRaw, ok := conf.Config["mode"]; ok { m, err := strconv.ParseUint(modeRaw, 8, 32) if err != nil { diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go index 0410f9e0cd..702918d579 100644 --- a/builtin/audit/file/backend_test.go +++ b/builtin/audit/file/backend_test.go @@ -61,7 +61,7 @@ func TestAuditFile_fileModeExisting(t *testing.T) { } defer os.Remove(f.Name()) - err = os.Chmod(f.Name(), 0777) + err = os.Chmod(f.Name(), 0o777) if err != nil { t.Fatalf("Failure to chmod temp file for testing.") } @@ -88,7 +88,7 @@ func TestAuditFile_fileModeExisting(t *testing.T) { if err != nil { t.Fatalf("cannot retrieve file mode from `Stat`") } - if info.Mode() != os.FileMode(0600) { + if info.Mode() != os.FileMode(0o600) { t.Fatalf("File mode does not match.") } } @@ -126,7 +126,7 @@ func BenchmarkAuditFile_request(b *testing.B) { TTL: 60 * time.Second, }, Headers: map[string][]string{ - "foo": []string{"bar"}, + "foo": {"bar"}, }, }, } diff --git a/builtin/credential/app-id/backend.go b/builtin/credential/app-id/backend.go index b7b2147f1d..b77221d75b 100644 --- a/builtin/credential/app-id/backend.go +++ b/builtin/credential/app-id/backend.go @@ -26,12 +26,12 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { PathMap: framework.PathMap{ Name: "app-id", Schema: map[string]*framework.FieldSchema{ - "display_name": &framework.FieldSchema{ + "display_name": { Type: framework.TypeString, Description: "A name to map to this app ID for logs.", }, - "value": &framework.FieldSchema{ + "value": { Type: framework.TypeString, Description: "Policies for the app ID.", }, @@ -43,12 +43,12 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { b.MapUserId = &framework.PathMap{ Name: "user-id", Schema: map[string]*framework.FieldSchema{ - "cidr_block": &framework.FieldSchema{ + "cidr_block": { Type: framework.TypeString, Description: "If not blank, restricts auth by this CIDR block", }, - "value": &framework.FieldSchema{ + "value": { Type: framework.TypeString, Description: "App IDs that this user associates with.", }, diff --git a/builtin/credential/app-id/path_login.go b/builtin/credential/app-id/path_login.go index fc964787e5..800922451f 100644 --- a/builtin/credential/app-id/path_login.go +++ b/builtin/credential/app-id/path_login.go @@ -19,12 +19,12 @@ func pathLoginWithAppIDPath(b *backend) *framework.Path { return &framework.Path{ Pattern: "login/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "app_id": &framework.FieldSchema{ + "app_id": { Type: framework.TypeString, Description: "The unique app ID", }, - "user_id": &framework.FieldSchema{ + "user_id": { Type: framework.TypeString, Description: "The unique user ID", }, @@ -43,12 +43,12 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login$", Fields: map[string]*framework.FieldSchema{ - "app_id": &framework.FieldSchema{ + "app_id": { Type: framework.TypeString, Description: "The unique app ID", }, - "user_id": &framework.FieldSchema{ + "user_id": { Type: framework.TypeString, Description: "The unique user ID", }, diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go index 45d9b14634..6f4c64873b 100644 --- a/builtin/credential/approle/path_login.go +++ b/builtin/credential/approle/path_login.go @@ -17,11 +17,11 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login$", Fields: map[string]*framework.FieldSchema{ - "role_id": &framework.FieldSchema{ + "role_id": { Type: framework.TypeString, Description: "Unique identifier of the Role. Required to be supplied when the 'bind_secret_id' constraint is set.", }, - "secret_id": &framework.FieldSchema{ + "secret_id": { Type: framework.TypeString, Default: "", Description: "SecretID belong to the App role", @@ -54,7 +54,6 @@ func (b *backend) pathLoginUpdateAliasLookahead(ctx context.Context, req *logica // Returns the Auth object indicating the authentication and authorization information // if the credentials provided are validated by the backend. func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // RoleID must be supplied during every login roleID := strings.TrimSpace(data.Get("role_id").(string)) if roleID == "" { diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index 75238102be..c39e9148b4 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -110,58 +110,58 @@ func rolePaths(b *backend) []*framework.Path { p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role_name"), Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "bind_secret_id": &framework.FieldSchema{ + "bind_secret_id": { Type: framework.TypeBool, Default: true, Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", }, - "bound_cidr_list": &framework.FieldSchema{ + "bound_cidr_list": { Type: framework.TypeCommaStringSlice, Description: `Use "secret_id_bound_cidrs" instead.`, Deprecated: true, }, - "secret_id_bound_cidrs": &framework.FieldSchema{ + "secret_id_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, - "secret_id_num_uses": &framework.FieldSchema{ + "secret_id_num_uses": { Type: framework.TypeInt, Description: `Number of times a SecretID can access the role, after which the SecretID will expire. Defaults to 0 meaning that the the secret_id is of unlimited use.`, }, - "secret_id_ttl": &framework.FieldSchema{ + "secret_id_ttl": { Type: framework.TypeDurationSecond, Description: `Duration in seconds after which the issued SecretID should expire. Defaults to 0, meaning no expiration.`, }, - "period": &framework.FieldSchema{ + "period": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, }, - "role_id": &framework.FieldSchema{ + "role_id": { Type: framework.TypeString, Description: "Identifier of the role. Defaults to a UUID.", }, - "local_secret_ids": &framework.FieldSchema{ + "local_secret_ids": { Type: framework.TypeBool, Description: `If set, the secret IDs generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later.`, @@ -182,7 +182,7 @@ can only be set during role creation and once set, it can't be reset later.`, return []*framework.Path{ p, - &framework.Path{ + { Pattern: "role/?", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, @@ -190,10 +190,10 @@ can only be set during role creation and once set, it can't be reset later.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/local-secret-ids$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, @@ -204,19 +204,19 @@ can only be set during role creation and once set, it can't be reset later.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-local-secret-ids"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-local-secret-ids"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, - "token_policies": &framework.FieldSchema{ + "token_policies": { Type: framework.TypeCommaStringSlice, Description: defTokenFields["token_policies"].Description, }, @@ -229,14 +229,14 @@ can only be set during role creation and once set, it can't be reset later.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-policies"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-policies"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "bound_cidr_list": &framework.FieldSchema{ + "bound_cidr_list": { Type: framework.TypeCommaStringSlice, Description: `Deprecated: Please use "secret_id_bound_cidrs" instead. Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, @@ -250,14 +250,14 @@ of CIDR blocks. If set, specifies the blocks of IP addresses which can perform t HelpSynopsis: strings.TrimSpace(roleHelp["role-bound-cidr-list"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-bound-cidr-list"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-bound-cidrs$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id_bound_cidrs": &framework.FieldSchema{ + "secret_id_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, @@ -271,14 +271,14 @@ IP addresses which can perform the login operation.`, HelpSynopsis: strings.TrimSpace(roleHelp["secret-id-bound-cidrs"][0]), HelpDescription: strings.TrimSpace(roleHelp["secret-id-bound-cidrs"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-bound-cidrs$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "token_bound_cidrs": &framework.FieldSchema{ + "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: defTokenFields["token_bound_cidrs"].Description, }, @@ -291,14 +291,14 @@ IP addresses which can perform the login operation.`, HelpSynopsis: strings.TrimSpace(roleHelp["token-bound-cidrs"][0]), HelpDescription: strings.TrimSpace(roleHelp["token-bound-cidrs"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "bind_secret_id": &framework.FieldSchema{ + "bind_secret_id": { Type: framework.TypeBool, Default: true, Description: "Impose secret_id to be presented when logging in using this role.", @@ -312,14 +312,14 @@ IP addresses which can perform the login operation.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-bind-secret-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-bind-secret-id"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id_num_uses": &framework.FieldSchema{ + "secret_id_num_uses": { Type: framework.TypeInt, Description: "Number of times a SecretID can access the role, after which the SecretID will expire.", }, @@ -332,14 +332,14 @@ IP addresses which can perform the login operation.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id_ttl": &framework.FieldSchema{ + "secret_id_ttl": { Type: framework.TypeDurationSecond, Description: `Duration in seconds after which the issued SecretID should expire. Defaults to 0, meaning no expiration.`, @@ -353,19 +353,19 @@ to 0, meaning no expiration.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-ttl"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-ttl"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "period": &framework.FieldSchema{ + "period": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, }, - "token_period": &framework.FieldSchema{ + "token_period": { Type: framework.TypeDurationSecond, Description: defTokenFields["token_period"].Description, }, @@ -378,14 +378,14 @@ to 0, meaning no expiration.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-period"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-period"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "token_num_uses": &framework.FieldSchema{ + "token_num_uses": { Type: framework.TypeInt, Description: defTokenFields["token_num_uses"].Description, }, @@ -398,14 +398,14 @@ to 0, meaning no expiration.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-token-num-uses"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-token-num-uses"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "token_ttl": &framework.FieldSchema{ + "token_ttl": { Type: framework.TypeDurationSecond, Description: defTokenFields["token_ttl"].Description, }, @@ -418,14 +418,14 @@ to 0, meaning no expiration.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-token-ttl"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-token-ttl"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "token_max_ttl": &framework.FieldSchema{ + "token_max_ttl": { Type: framework.TypeDurationSecond, Description: defTokenFields["token_max_ttl"].Description, }, @@ -438,14 +438,14 @@ to 0, meaning no expiration.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-token-max-ttl"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-token-max-ttl"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "role_id": &framework.FieldSchema{ + "role_id": { Type: framework.TypeString, Description: "Identifier of the role. Defaults to a UUID.", }, @@ -457,26 +457,26 @@ to 0, meaning no expiration.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-id"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "metadata": &framework.FieldSchema{ + "metadata": { Type: framework.TypeString, Description: `Metadata to be tied to the SecretID. This should be a JSON formatted string containing the metadata in key value pairs.`, }, - "cidr_list": &framework.FieldSchema{ + "cidr_list": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or list of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.`, }, - "token_bound_cidrs": &framework.FieldSchema{ + "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: defTokenFields["token_bound_cidrs"].Description, }, @@ -488,14 +488,14 @@ the role.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id": &framework.FieldSchema{ + "secret_id": { Type: framework.TypeString, Description: "SecretID attached to the role.", }, @@ -506,14 +506,14 @@ the role.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-lookup"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-lookup"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id": &framework.FieldSchema{ + "secret_id": { Type: framework.TypeString, Description: "SecretID attached to the role.", }, @@ -525,14 +525,14 @@ the role.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-destroy"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id_accessor": &framework.FieldSchema{ + "secret_id_accessor": { Type: framework.TypeString, Description: "Accessor of the SecretID", }, @@ -543,14 +543,14 @@ the role.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id_accessor": &framework.FieldSchema{ + "secret_id_accessor": { Type: framework.TypeString, Description: "Accessor of the SecretID", }, @@ -562,30 +562,30 @@ the role.`, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]), }, - &framework.Path{ + { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$", Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role.", }, - "secret_id": &framework.FieldSchema{ + "secret_id": { Type: framework.TypeString, Description: "SecretID to be attached to the role.", }, - "metadata": &framework.FieldSchema{ + "metadata": { Type: framework.TypeString, Description: `Metadata to be tied to the SecretID. This should be a JSON formatted string containing metadata in key value pairs.`, }, - "cidr_list": &framework.FieldSchema{ + "cidr_list": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or list of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.`, }, - "token_bound_cidrs": &framework.FieldSchema{ + "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, @@ -2513,11 +2513,13 @@ that are generated against the role using 'role//secret-id' or "role-secret-id-lookup": { "Read the properties of an issued secret_id", `This endpoint is used to read the properties of a secret_id associated to a -role.`}, +role.`, + }, "role-secret-id-destroy": { "Invalidate an issued secret_id", `This endpoint is used to delete the properties of a secret_id associated to a -role.`}, +role.`, + }, "role-secret-id-accessor-lookup": { "Read an issued secret_id, using its accessor", `This is particularly useful to lookup the non-expiring 'secret_id's. diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index 5cc0bfb1fe..215d15c57c 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -1890,7 +1890,6 @@ func TestAppRole_TokenutilUpgrade(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Construct the storage entry object based on our test case. tokenTypeKV := "" if !tt.storageValMissing { diff --git a/builtin/credential/approle/path_tidy_user_id.go b/builtin/credential/approle/path_tidy_user_id.go index 49a3e3c6a4..ac281bbb31 100644 --- a/builtin/credential/approle/path_tidy_user_id.go +++ b/builtin/credential/approle/path_tidy_user_id.go @@ -45,7 +45,6 @@ func (b *backend) tidySecretID(ctx context.Context, req *logical.Request) (*logi resp := &logical.Response{} resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) - } type tidyHelperSecretIDAccessor struct { @@ -197,7 +196,7 @@ func (b *backend) tidySecretIDinternal(s logical.Storage) { // roles without having a lock while doing so. Because // accHashesByLockID was populated previously, at worst this may // mean that we fail to clean up something we ought to. - var allSecretIDHMACs = make(map[string]struct{}) + allSecretIDHMACs := make(map[string]struct{}) for _, roleNameHMAC := range roleNameHMACs { secretIDHMACs, err := s.List(ctx, secretIDPrefixToUse+roleNameHMAC) if err != nil { @@ -265,7 +264,9 @@ func (b *backend) pathTidySecretIDUpdate(ctx context.Context, req *logical.Reque return b.tidySecretID(ctx, req) } -const pathTidySecretIDSyn = "Trigger the clean-up of expired SecretID entries." -const pathTidySecretIDDesc = `SecretIDs will have expiration time attached to them. The periodic function +const ( + pathTidySecretIDSyn = "Trigger the clean-up of expired SecretID entries." + pathTidySecretIDDesc = `SecretIDs will have expiration time attached to them. The periodic function of the backend will look for expired entries and delete them. This happens once in a minute. Invoking this endpoint will trigger the clean-up action, without waiting for the backend's periodic function.` +) diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index 5a555ddac3..354fcb24be 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -26,7 +26,8 @@ var defaultAllowedSTSRequestHeaders = []string{ "X-Amz-Date", "X-Amz-Security-Token", "X-Amz-Signature", - "X-Amz-SignedHeaders"} + "X-Amz-SignedHeaders", +} func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b, err := Backend(conf) diff --git a/builtin/credential/aws/backend_e2e_test.go b/builtin/credential/aws/backend_e2e_test.go index a252d27dc6..ac2bb22f12 100644 --- a/builtin/credential/aws/backend_e2e_test.go +++ b/builtin/credential/aws/backend_e2e_test.go @@ -14,7 +14,6 @@ import ( ) func TestBackend_E2E_Initialize(t *testing.T) { - ctx := context.Background() // Set up the cluster. This will trigger an Initialize(); we sleep briefly @@ -62,7 +61,8 @@ func TestBackend_E2E_Initialize(t *testing.T) { data := map[string]interface{}{ "auth_type": "ec2", "policies": "default", - "bound_subnet_id": "subnet-abcdef"} + "bound_subnet_id": "subnet-abcdef", + } if _, err := core.Client.Logical().Write("auth/aws/role/test-role", data); err != nil { t.Fatal(err) } @@ -100,7 +100,6 @@ func TestBackend_E2E_Initialize(t *testing.T) { } func setupAwsTestCluster(t *testing.T, _ context.Context) *vault.TestCluster { - // create a cluster with the aws auth backend built-in logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index 4ea90b7b17..5dc7e609ec 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -20,9 +20,11 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const testVaultHeaderValue = "VaultAcceptanceTesting" -const testValidRoleName = "valid-role" -const testInvalidRoleName = "invalid-role" +const ( + testVaultHeaderValue = "VaultAcceptanceTesting" + testValidRoleName = "valid-role" + testInvalidRoleName = "invalid-role" +) func TestBackend_CreateParseVerifyRoleTag(t *testing.T) { // create a backend @@ -479,7 +481,8 @@ func TestBackend_ConfigClient(t *testing.T) { t.Fatal(err) } - data := map[string]interface{}{"access_key": "AKIAJBRHKV6EVTTNXDHA", + data := map[string]interface{}{ + "access_key": "AKIAJBRHKV6EVTTNXDHA", "secret_key": "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", } @@ -495,7 +498,8 @@ func TestBackend_ConfigClient(t *testing.T) { Data: data, } - data3 := map[string]interface{}{"access_key": "", + data3 := map[string]interface{}{ + "access_key": "", "secret_key": "mCtSM8ZUEQ3mOFVZYPBQkf2sO6F/W7a5TVzrl3Oj", } stepInvalidAccessKey := logicaltest.TestStep{ @@ -505,7 +509,8 @@ func TestBackend_ConfigClient(t *testing.T) { ErrorOk: true, } - data4 := map[string]interface{}{"access_key": "accesskey", + data4 := map[string]interface{}{ + "access_key": "accesskey", "secret_key": "", } stepInvalidSecretKey := logicaltest.TestStep{ @@ -554,7 +559,7 @@ func TestBackend_ConfigClient(t *testing.T) { t.Fatal(err) } - //test existence check returning true + // test existence check returning true checkFound, exists, err = b.HandleExistenceCheck(context.Background(), &logical.Request{ Operation: logical.CreateOperation, Path: "config/client", @@ -907,7 +912,6 @@ func TestBackend_PathRoleTag(t *testing.T) { } func TestBackend_PathBlacklistRoleTag(t *testing.T) { - for _, path := range []string{"roletag-blacklist/", "roletag-denylist/"} { // create the backend storage := &logical.InmemStorage{} @@ -1483,7 +1487,8 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // potentially pick up credentials from the ~/.config files), but probably // good enough rather than having to muck around in the low-level details for _, envvar := range []string{ - "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SECURITY_TOKEN", "AWS_SESSION_TOKEN"} { + "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SECURITY_TOKEN", "AWS_SESSION_TOKEN", + } { // Skip test if any of the required env vars are missing testEnvVar := os.Getenv("TEST_" + envvar) if testEnvVar == "" { diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go index cec44f6375..98d959953a 100644 --- a/builtin/credential/aws/cli.go +++ b/builtin/credential/aws/cli.go @@ -128,7 +128,6 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro loginData["role"] = role path := fmt.Sprintf("auth/%s/login", mount) secret, err := c.Logical().Write(path, loginData) - if err != nil { return nil, err } diff --git a/builtin/credential/aws/client.go b/builtin/credential/aws/client.go index 0753c95308..40c71fddde 100644 --- a/builtin/credential/aws/client.go +++ b/builtin/credential/aws/client.go @@ -83,7 +83,6 @@ func (b *backend) getRawClientConfig(ctx context.Context, s logical.Storage, reg // stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed // credentials. The credentials will expire after 15 minutes but will auto-refresh. func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) { - config, err := b.getRawClientConfig(ctx, s, region, clientType) if err != nil { return nil, err @@ -144,7 +143,7 @@ func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region // acquired for write operation before calling this method. func (b *backend) flushCachedEC2Clients() { // deleting items in map during iteration is safe - for region, _ := range b.EC2ClientsMap { + for region := range b.EC2ClientsMap { delete(b.EC2ClientsMap, region) } } @@ -155,7 +154,7 @@ func (b *backend) flushCachedEC2Clients() { // lock should be acquired for write operation before calling this method. func (b *backend) flushCachedIAMClients() { // deleting items in map during iteration is safe - for region, _ := range b.IAMClientsMap { + for region := range b.IAMClientsMap { delete(b.IAMClientsMap, region) } } diff --git a/builtin/credential/aws/path_config_certificate.go b/builtin/credential/aws/path_config_certificate.go index 326ac6c779..cd4250b56e 100644 --- a/builtin/credential/aws/path_config_certificate.go +++ b/builtin/credential/aws/path_config_certificate.go @@ -442,9 +442,11 @@ corresponding regions should be registered using this endpoint. PKCS#7 is verifi using a collection of certificates containing the default certificate and all the certificates that are registered using this endpoint. ` + const pathListCertificatesHelpSyn = ` Lists all the AWS public certificates that are registered with the backend. ` + const pathListCertificatesHelpDesc = ` Certificates will be listed by their respective names that were used during registration. ` diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go index 581aa0ac91..76e0b302ba 100644 --- a/builtin/credential/aws/path_config_identity.go +++ b/builtin/credential/aws/path_config_identity.go @@ -178,11 +178,13 @@ type identityConfig struct { EC2AuthMetadataHandler *authmetadata.Handler `json:"ec2_auth_metadata_handler"` } -const identityAliasIAMUniqueID = "unique_id" -const identityAliasIAMFullArn = "full_arn" -const identityAliasEC2InstanceID = "instance_id" -const identityAliasEC2ImageID = "image_id" -const identityAliasRoleID = "role_id" +const ( + identityAliasIAMUniqueID = "unique_id" + identityAliasIAMFullArn = "full_arn" + identityAliasEC2InstanceID = "instance_id" + identityAliasEC2ImageID = "image_id" + identityAliasRoleID = "role_id" +) const pathConfigIdentityHelpSyn = ` Configure the way the AWS auth method interacts with the identity store diff --git a/builtin/credential/aws/path_config_sts.go b/builtin/credential/aws/path_config_sts.go index 066f1cb59a..3666a90041 100644 --- a/builtin/credential/aws/path_config_sts.go +++ b/builtin/credential/aws/path_config_sts.go @@ -250,6 +250,7 @@ by assumption of these STS roles. The environment in which the Vault server resides must have access to assume the given STS roles. ` + const pathListStsHelpSyn = ` List all the AWS account/STS role relationships registered with Vault. ` diff --git a/builtin/credential/aws/path_config_tidy_identity_accesslist.go b/builtin/credential/aws/path_config_tidy_identity_accesslist.go index f09586c29c..f89c5ab215 100644 --- a/builtin/credential/aws/path_config_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_config_tidy_identity_accesslist.go @@ -18,7 +18,7 @@ func (b *backend) pathConfigTidyIdentityAccessList() *framework.Path { Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, - Default: 259200, //72h + Default: 259200, // 72h Description: `The amount of extra time that must have passed beyond the identity's expiration, before it is removed from the backend storage.`, }, @@ -152,6 +152,7 @@ type tidyWhitelistIdentityConfig struct { const pathConfigTidyIdentityAccessListHelpSyn = ` Configures the periodic tidying operation of the access list identity entries. ` + const pathConfigTidyIdentityAccessListHelpDesc = ` By default, the expired entries in the access list will be attempted to be removed periodically. This operation will look for expired items in the list and purges them. diff --git a/builtin/credential/aws/path_config_tidy_roletag_denylist.go b/builtin/credential/aws/path_config_tidy_roletag_denylist.go index 66e5168e7b..e00404d7ec 100644 --- a/builtin/credential/aws/path_config_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_config_tidy_roletag_denylist.go @@ -17,7 +17,7 @@ func (b *backend) pathConfigTidyRoletagDenyList() *framework.Path { Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, - Default: 15552000, //180d + Default: 15552000, // 180d Description: `The amount of extra time that must have passed beyond the roletag expiration, before it is removed from the backend storage. Defaults to 4320h (180 days).`, @@ -152,6 +152,7 @@ type tidyDenyListRoleTagConfig struct { const pathConfigTidyRoletagDenyListHelpSyn = ` Configures the periodic tidying operation of the deny listed role tag entries. ` + const pathConfigTidyRoletagDenyListHelpDesc = ` By default, the expired entries in the deny list will be attempted to be removed periodically. This operation will look for expired items in the list and purges them. diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index 499c734aeb..6c64c6446c 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -193,7 +193,6 @@ func (b *backend) validateInstance(ctx context.Context, s logical.Storage, insta } if len(status.Reservations) == 0 { return nil, fmt.Errorf("no reservations found in instance description") - } if len(status.Reservations[0].Instances) == 0 { return nil, fmt.Errorf("no instance details found in reservations") @@ -511,7 +510,6 @@ func (b *backend) verifyInstanceMeetsRoleRequirements(ctx context.Context, // Extract out the instance profile name from the instance // profile ARN iamInstanceProfileEntity, err := parseIamArn(iamInstanceProfileARN) - if err != nil { return nil, errwrap.Wrapf(fmt.Sprintf("failed to parse IAM instance profile ARN %q: {{err}}", iamInstanceProfileARN), err) } diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go index 129d6124a1..9b9af0f9fc 100644 --- a/builtin/credential/aws/path_role.go +++ b/builtin/credential/aws/path_role.go @@ -16,9 +16,7 @@ import ( "github.com/mitchellh/copystructure" ) -var ( - currentRoleStorageVersion = 3 -) +var currentRoleStorageVersion = 3 func (b *backend) pathRole() *framework.Path { p := &framework.Path{ @@ -333,7 +331,6 @@ func (b *backend) setRole(ctx context.Context, s logical.Storage, roleName strin // initialize is used to initialize the AWS roles func (b *backend) initialize(ctx context.Context, req *logical.InitializationRequest) error { - // on standbys and DR secondaries we do not want to run any kind of upgrade logic if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby | consts.ReplicationDRSecondary) { return nil diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go index ee721fcf89..a46a28a8a8 100644 --- a/builtin/credential/aws/path_role_test.go +++ b/builtin/credential/aws/path_role_test.go @@ -528,7 +528,6 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) { if !resp.IsError() { t.Fatalf("allowed changing resolve_aws_unique_ids from true to false") } - } func TestAwsEc2_RoleCrud(t *testing.T) { @@ -815,7 +814,6 @@ func TestRoleEntryUpgradeV(t *testing.T) { } func TestRoleInitialize(t *testing.T) { - config := logical.TestBackendConfig() storage := &logical.InmemStorage{} config.StorageView = storage @@ -970,7 +968,6 @@ func TestRoleInitialize(t *testing.T) { } func TestAwsVersion(t *testing.T) { - before := awsVersion{ Version: 42, } diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index 8e2db8dfc3..59ef14c8a7 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -5,31 +5,30 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" "encoding/pem" + "fmt" + "io" + "io/ioutil" + "math/big" mathrand "math/rand" + "net" "net/http" "net/url" + "os" "path/filepath" + "reflect" + "testing" + "time" "github.com/go-test/deep" "github.com/hashicorp/go-sockaddr" "golang.org/x/net/http2" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "fmt" - "io" - "io/ioutil" - "math/big" - "net" - "os" - "reflect" - "testing" - "time" - cleanhttp "github.com/hashicorp/go-cleanhttp" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" @@ -98,7 +97,7 @@ func generateTestCertAndConnState(t *testing.T, template *x509.Certificate) (str Type: "CERTIFICATE", Bytes: caBytes, } - err = ioutil.WriteFile(filepath.Join(tempDir, "ca_cert.pem"), pem.EncodeToMemory(caCertPEMBlock), 0755) + err = ioutil.WriteFile(filepath.Join(tempDir, "ca_cert.pem"), pem.EncodeToMemory(caCertPEMBlock), 0o755) if err != nil { t.Fatal(err) } @@ -110,7 +109,7 @@ func generateTestCertAndConnState(t *testing.T, template *x509.Certificate) (str Type: "EC PRIVATE KEY", Bytes: marshaledCAKey, } - err = ioutil.WriteFile(filepath.Join(tempDir, "ca_key.pem"), pem.EncodeToMemory(caKeyPEMBlock), 0755) + err = ioutil.WriteFile(filepath.Join(tempDir, "ca_key.pem"), pem.EncodeToMemory(caKeyPEMBlock), 0o755) if err != nil { t.Fatal(err) } @@ -127,7 +126,7 @@ func generateTestCertAndConnState(t *testing.T, template *x509.Certificate) (str Type: "CERTIFICATE", Bytes: certBytes, } - err = ioutil.WriteFile(filepath.Join(tempDir, "cert.pem"), pem.EncodeToMemory(certPEMBlock), 0755) + err = ioutil.WriteFile(filepath.Join(tempDir, "cert.pem"), pem.EncodeToMemory(certPEMBlock), 0o755) if err != nil { t.Fatal(err) } @@ -139,7 +138,7 @@ func generateTestCertAndConnState(t *testing.T, template *x509.Certificate) (str Type: "EC PRIVATE KEY", Bytes: marshaledKey, } - err = ioutil.WriteFile(filepath.Join(tempDir, "key.pem"), pem.EncodeToMemory(keyPEMBlock), 0755) + err = ioutil.WriteFile(filepath.Join(tempDir, "key.pem"), pem.EncodeToMemory(keyPEMBlock), 0o755) if err != nil { t.Fatal(err) } @@ -1580,7 +1579,7 @@ func testAccStepLoginWithNameInvalid(t *testing.T, connState tls.ConnectionState func testAccStepListCerts( t *testing.T, certs []string) []logicaltest.TestStep { return []logicaltest.TestStep{ - logicaltest.TestStep{ + { Operation: logical.ListOperation, Path: "certs", Check: func(resp *logical.Response) error { @@ -1599,7 +1598,7 @@ func testAccStepListCerts( } return nil }, - }, logicaltest.TestStep{ + }, { Operation: logical.ListOperation, Path: "certs/", Check: func(resp *logical.Response) error { @@ -1973,7 +1972,7 @@ func TestBackend_CertUpgrade(t *testing.T) { Period: time.Second, TTL: time.Second, MaxTTL: time.Second, - BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, } entry, err := logical.StorageEntryJSON("cert/foo", foo) @@ -1995,13 +1994,13 @@ func TestBackend_CertUpgrade(t *testing.T) { Period: time.Second, TTL: time.Second, MaxTTL: time.Second, - BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, TokenParams: tokenutil.TokenParams{ TokenPolicies: []string{"foo"}, TokenPeriod: time.Second, TokenTTL: time.Second, TokenMaxTTL: time.Second, - TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, }, } if diff := deep.Equal(certEntry, exp); diff != nil { diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go index 7c5543db45..ca2258423a 100644 --- a/builtin/credential/cert/path_certs.go +++ b/builtin/credential/cert/path_certs.go @@ -34,12 +34,12 @@ func pathCerts(b *backend) *framework.Path { p := &framework.Path{ Pattern: "certs/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The name of the certificate", }, - "certificate": &framework.FieldSchema{ + "certificate": { Type: framework.TypeString, Description: `The public certificate that should be trusted. Must be x509 PEM encoded.`, @@ -48,7 +48,7 @@ Must be x509 PEM encoded.`, }, }, - "allowed_names": &framework.FieldSchema{ + "allowed_names": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of names. At least one must exist in either the Common Name or SANs. Supports globbing. @@ -59,7 +59,7 @@ allowed_email_sans, allowed_uri_sans.`, }, }, - "allowed_common_names": &framework.FieldSchema{ + "allowed_common_names": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of names. At least one must exist in the Common Name. Supports globbing.`, @@ -68,7 +68,7 @@ At least one must exist in the Common Name. Supports globbing.`, }, }, - "allowed_dns_sans": &framework.FieldSchema{ + "allowed_dns_sans": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of DNS names. At least one must exist in the SANs. Supports globbing.`, @@ -78,7 +78,7 @@ At least one must exist in the SANs. Supports globbing.`, }, }, - "allowed_email_sans": &framework.FieldSchema{ + "allowed_email_sans": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of Email Addresses. At least one must exist in the SANs. Supports globbing.`, @@ -88,7 +88,7 @@ At least one must exist in the SANs. Supports globbing.`, }, }, - "allowed_uri_sans": &framework.FieldSchema{ + "allowed_uri_sans": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of URIs. At least one must exist in the SANs. Supports globbing.`, @@ -98,7 +98,7 @@ At least one must exist in the SANs. Supports globbing.`, }, }, - "allowed_organizational_units": &framework.FieldSchema{ + "allowed_organizational_units": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of Organizational Units names. At least one must exist in the OU field.`, @@ -107,50 +107,50 @@ At least one must exist in the OU field.`, }, }, - "required_extensions": &framework.FieldSchema{ + "required_extensions": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated string or array of extensions formatted as "oid:value". Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on "value".`, }, - "display_name": &framework.FieldSchema{ + "display_name": { Type: framework.TypeString, Description: `The display name to use for clients using this certificate.`, }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, - "lease": &framework.FieldSchema{ + "lease": { Type: framework.TypeInt, Description: tokenutil.DeprecationText("token_ttl"), Deprecated: true, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_ttl"), Deprecated: true, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_max_ttl"), Deprecated: true, }, - "period": &framework.FieldSchema{ + "period": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, }, - "bound_cidrs": &framework.FieldSchema{ + "bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_bound_cidrs"), Deprecated: true, diff --git a/builtin/credential/cert/path_config.go b/builtin/credential/cert/path_config.go index 67ee61d2e5..a55b6c0738 100644 --- a/builtin/credential/cert/path_config.go +++ b/builtin/credential/cert/path_config.go @@ -12,7 +12,7 @@ func pathConfig(b *backend) *framework.Path { return &framework.Path{ Pattern: "config", Fields: map[string]*framework.FieldSchema{ - "disable_binding": &framework.FieldSchema{ + "disable_binding": { Type: framework.TypeBool, Default: false, Description: `If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.`, diff --git a/builtin/credential/cert/path_crls.go b/builtin/credential/cert/path_crls.go index 0f963ab91c..92a6442022 100644 --- a/builtin/credential/cert/path_crls.go +++ b/builtin/credential/cert/path_crls.go @@ -18,12 +18,12 @@ func pathCRLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "crls/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The name of the certificate", }, - "crl": &framework.FieldSchema{ + "crl": { Type: framework.TypeString, Description: `The public certificate that should be trusted. May be DER or PEM encoded. Note: the expiration time @@ -230,8 +230,7 @@ type CRLInfo struct { Serials map[string]RevokedSerialInfo `json:"serials" structs:"serials" mapstructure:"serials"` } -type RevokedSerialInfo struct { -} +type RevokedSerialInfo struct{} const pathCRLsHelpSyn = ` Manage Certificate Revocation Lists checked during authentication. diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index e0a4b5005f..d9f5bb5e70 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -31,7 +31,7 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The name of the certificate role to authenticate against.", }, diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go index 3822936ac7..3e057602ee 100644 --- a/builtin/credential/github/path_config.go +++ b/builtin/credential/github/path_config.go @@ -17,12 +17,12 @@ func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", Fields: map[string]*framework.FieldSchema{ - "organization": &framework.FieldSchema{ + "organization": { Type: framework.TypeString, Description: "The organization users must be part of", }, - "base_url": &framework.FieldSchema{ + "base_url": { Type: framework.TypeString, Description: `The API endpoint to use. Useful if you are running GitHub Enterprise or an @@ -32,12 +32,12 @@ API-compatible authentication server.`, Group: "GitHub Options", }, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_ttl"), Deprecated: true, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_max_ttl"), Deprecated: true, diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go index edebb9a7b3..14b5b0d04d 100644 --- a/builtin/credential/github/path_login.go +++ b/builtin/credential/github/path_login.go @@ -18,7 +18,7 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "GitHub personal API token", }, @@ -248,13 +248,11 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, t } groupPoliciesList, err := b.TeamMap.Policies(ctx, req.Storage, teamNames...) - if err != nil { return nil, nil, err } userPoliciesList, err := b.UserMap.Policies(ctx, req.Storage, []string{*user.Login}...) - if err != nil { return nil, nil, err } diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index b25864acd7..12ce5d4c53 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -61,7 +61,6 @@ type backend struct { } func (b *backend) Login(ctx context.Context, req *logical.Request, username string, password string) ([]string, *logical.Response, []string, error) { - cfg, err := b.Config(ctx, req) if err != nil { return nil, nil, nil, err diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index 9142d0142d..415e7edf9f 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -617,12 +617,12 @@ func TestBackend_configDefaultsAfterUpdate(t *testing.T) { logicaltest.Test(t, logicaltest.TestCase{ CredentialBackend: b, Steps: []logicaltest.TestStep{ - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "config", Data: map[string]interface{}{}, }, - logicaltest.TestStep{ + { Operation: logical.ReadOperation, Path: "config", Check: func(resp *logical.Response) error { @@ -1032,5 +1032,4 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { if diff := deep.Equal(exp, configEntry); diff != nil { t.Fatal(diff) } - } diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index 18123323e0..41d66d04e3 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -14,12 +14,12 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "DN (distinguished name) to be used for login.", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password for this user.", }, diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go index 6e7db1440c..ee1588ee25 100644 --- a/builtin/credential/okta/backend_test.go +++ b/builtin/credential/okta/backend_test.go @@ -171,7 +171,6 @@ func testLoginWrite(t *testing.T, username, password, reason string, expectedTTL } } else if reason != "" { return fmt.Errorf("expected error containing %q, got no error", reason) - } if resp.Auth != nil { diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go index 12c1a0c030..7fc93efb87 100644 --- a/builtin/credential/okta/path_config.go +++ b/builtin/credential/okta/path_config.go @@ -25,53 +25,53 @@ func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, Fields: map[string]*framework.FieldSchema{ - "organization": &framework.FieldSchema{ + "organization": { Type: framework.TypeString, Description: "Use org_name instead.", Deprecated: true, }, - "org_name": &framework.FieldSchema{ + "org_name": { Type: framework.TypeString, Description: "Name of the organization to be used in the Okta API.", DisplayAttrs: &framework.DisplayAttributes{ Name: "Organization Name", }, }, - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Use api_token instead.", Deprecated: true, }, - "api_token": &framework.FieldSchema{ + "api_token": { Type: framework.TypeString, Description: "Okta API key.", DisplayAttrs: &framework.DisplayAttributes{ Name: "API Token", }, }, - "base_url": &framework.FieldSchema{ + "base_url": { Type: framework.TypeString, Description: `The base domain to use for the Okta API. When not specified in the configuration, "okta.com" is used.`, DisplayAttrs: &framework.DisplayAttributes{ Name: "Base URL", }, }, - "production": &framework.FieldSchema{ + "production": { Type: framework.TypeBool, Description: `Use base_url instead.`, Deprecated: true, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_ttl"), Deprecated: true, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_max_ttl"), Deprecated: true, }, - "bypass_okta_mfa": &framework.FieldSchema{ + "bypass_okta_mfa": { Type: framework.TypeBool, Description: `When set true, requests by Okta for a MFA check will be bypassed. This also disallows certain status checks on the account, such as whether the password is expired.`, DisplayAttrs: &framework.DisplayAttributes{ diff --git a/builtin/credential/okta/path_groups.go b/builtin/credential/okta/path_groups.go index 9ba36b282a..f9ff0225ac 100644 --- a/builtin/credential/okta/path_groups.go +++ b/builtin/credential/okta/path_groups.go @@ -68,7 +68,6 @@ func (b *backend) Group(ctx context.Context, s logical.Storage, n string) (*Grou entries, err := groupList(ctx, s) if err != nil { return nil, "", err - } for _, groupName := range entries { diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index f21d9a6fdd..768846a27b 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -152,11 +152,9 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f } return resp, nil - } func (b *backend) getConfig(ctx context.Context, req *logical.Request) (*ConfigEntry, error) { - cfg, err := b.Config(ctx, req.Storage) if err != nil { return nil, err diff --git a/builtin/credential/okta/path_users.go b/builtin/credential/okta/path_users.go index d3408ee42f..bd5fdc0ebb 100644 --- a/builtin/credential/okta/path_users.go +++ b/builtin/credential/okta/path_users.go @@ -28,17 +28,17 @@ func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the user.", }, - "groups": &framework.FieldSchema{ + "groups": { Type: framework.TypeCommaStringSlice, Description: "List of groups associated with the user.", }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: "List of policies associated with the user.", }, diff --git a/builtin/credential/radius/backend_test.go b/builtin/credential/radius/backend_test.go index 1ded629b6c..da0e930de3 100644 --- a/builtin/credential/radius/backend_test.go +++ b/builtin/credential/radius/backend_test.go @@ -296,7 +296,7 @@ func testAccUserLoginPolicy(t *testing.T, user string, data map[string]interface Data: data, ErrorOk: expectError, Unauthenticated: true, - //Check: logicaltest.TestCheckAuth(policies), + // Check: logicaltest.TestCheckAuth(policies), Check: func(resp *logical.Response) error { res := logicaltest.TestCheckAuth(policies)(resp) if res != nil && expectError { diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go index aa0e730486..33d4d0d991 100644 --- a/builtin/credential/radius/path_config.go +++ b/builtin/credential/radius/path_config.go @@ -13,14 +13,14 @@ func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", Fields: map[string]*framework.FieldSchema{ - "host": &framework.FieldSchema{ + "host": { Type: framework.TypeString, Description: "RADIUS server host", DisplayAttrs: &framework.DisplayAttributes{ Name: "Host", }, }, - "port": &framework.FieldSchema{ + "port": { Type: framework.TypeInt, Default: 1812, Description: "RADIUS server port (default: 1812)", @@ -28,11 +28,11 @@ func pathConfig(b *backend) *framework.Path { Value: 1812, }, }, - "secret": &framework.FieldSchema{ + "secret": { Type: framework.TypeString, Description: "Secret shared with the RADIUS server", }, - "unregistered_user_policies": &framework.FieldSchema{ + "unregistered_user_policies": { Type: framework.TypeString, Default: "", Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: empty)", @@ -40,7 +40,7 @@ func pathConfig(b *backend) *framework.Path { Name: "Policies for unregistered users", }, }, - "dial_timeout": &framework.FieldSchema{ + "dial_timeout": { Type: framework.TypeDurationSecond, Default: 10, Description: "Number of seconds before connect times out (default: 10)", @@ -48,7 +48,7 @@ func pathConfig(b *backend) *framework.Path { Value: 10, }, }, - "read_timeout": &framework.FieldSchema{ + "read_timeout": { Type: framework.TypeDurationSecond, Default: 10, Description: "Number of seconds before response times out (default: 10)", @@ -56,7 +56,7 @@ func pathConfig(b *backend) *framework.Path { Value: 10, }, }, - "nas_port": &framework.FieldSchema{ + "nas_port": { Type: framework.TypeInt, Default: 10, Description: "RADIUS NAS port field (default: 10)", @@ -65,7 +65,7 @@ func pathConfig(b *backend) *framework.Path { Value: 10, }, }, - "nas_identifier": &framework.FieldSchema{ + "nas_identifier": { Type: framework.TypeString, Default: "", Description: "RADIUS NAS Identifier field (optional)", diff --git a/builtin/credential/radius/path_login.go b/builtin/credential/radius/path_login.go index 75b088e653..c8a1ab8f43 100644 --- a/builtin/credential/radius/path_login.go +++ b/builtin/credential/radius/path_login.go @@ -21,17 +21,17 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login" + framework.OptionalParamRegex("urlusername"), Fields: map[string]*framework.FieldSchema{ - "urlusername": &framework.FieldSchema{ + "urlusername": { Type: framework.TypeString, Description: "Username to be used for login. (URL parameter)", }, - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username to be used for login. (POST request body)", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password for this user.", }, diff --git a/builtin/credential/radius/path_users.go b/builtin/credential/radius/path_users.go index 470513f4db..de7b5d4690 100644 --- a/builtin/credential/radius/path_users.go +++ b/builtin/credential/radius/path_users.go @@ -31,12 +31,12 @@ func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the RADIUS user.", }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the user.", }, @@ -116,8 +116,7 @@ func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *fra } func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - - var policies = policyutil.ParsePolicies(d.Get("policies")) + policies := policyutil.ParsePolicies(d.Get("policies")) for _, policy := range policies { if policy == "root" { return logical.ErrorResponse("root policy cannot be granted by an auth method"), nil diff --git a/builtin/credential/token/cli.go b/builtin/credential/token/cli.go index cce81e7de6..7fd115f5af 100644 --- a/builtin/credential/token/cli.go +++ b/builtin/credential/token/cli.go @@ -134,7 +134,6 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro Renewable: renewable, }, }, nil - } func (h *CLIHandler) Help() string { diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go index e020311411..546b9d1554 100644 --- a/builtin/credential/userpass/backend_test.go +++ b/builtin/credential/userpass/backend_test.go @@ -2,13 +2,12 @@ package userpass import ( "context" + "crypto/tls" "fmt" "reflect" "testing" "time" - "crypto/tls" - "github.com/go-test/deep" sockaddr "github.com/hashicorp/go-sockaddr" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" @@ -78,7 +77,7 @@ func TestBackend_CRUD(t *testing.T) { if diff := deep.Equal(resp.Data["token_policies"], []string{"foo"}); diff != nil { t.Fatal(diff) } - if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{localhostSockAddr}}); diff != nil { + if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{{localhostSockAddr}}); diff != nil { t.Fatal(diff) } @@ -124,10 +123,10 @@ func TestBackend_CRUD(t *testing.T) { if diff := deep.Equal(resp.Data["token_policies"], []string{"bar"}); diff != nil { t.Fatal(diff) } - if diff := deep.Equal(resp.Data["bound_cidrs"], []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{localhostSockAddr}}); diff != nil { + if diff := deep.Equal(resp.Data["bound_cidrs"], []*sockaddr.SockAddrMarshaler{{localhostSockAddr}}); diff != nil { t.Fatal(diff) } - if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{localhostSockAddr}}); diff != nil { + if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{{localhostSockAddr}}); diff != nil { t.Fatal(diff) } } @@ -221,7 +220,6 @@ func TestBackend_passwordUpdate(t *testing.T) { testAccStepLogin(t, "web", "newpassword", []string{"default", "foo"}), }, }) - } func TestBackend_policiesUpdate(t *testing.T) { @@ -247,7 +245,6 @@ func TestBackend_policiesUpdate(t *testing.T) { testAccStepLogin(t, "web", "password", []string{"bar", "default", "foo"}), }, }) - } func testUpdatePassword(t *testing.T, user, password string) logicaltest.TestStep { @@ -382,7 +379,7 @@ func TestBackend_UserUpgrade(t *testing.T) { Policies: []string{"foo"}, TTL: time.Second, MaxTTL: time.Second, - BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, } entry, err := logical.StorageEntryJSON("user/foo", foo) @@ -403,12 +400,12 @@ func TestBackend_UserUpgrade(t *testing.T) { Policies: []string{"foo"}, TTL: time.Second, MaxTTL: time.Second, - BoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, TokenParams: tokenutil.TokenParams{ TokenPolicies: []string{"foo"}, TokenTTL: time.Second, TokenMaxTTL: time.Second, - TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{&sockaddr.SockAddrMarshaler{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, }, } if diff := deep.Equal(userEntry, exp); diff != nil { diff --git a/builtin/credential/userpass/path_login.go b/builtin/credential/userpass/path_login.go index 2c10e0f68e..95d63f28c6 100644 --- a/builtin/credential/userpass/path_login.go +++ b/builtin/credential/userpass/path_login.go @@ -17,12 +17,12 @@ func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login/" + framework.GenericNameRegex("username"), Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username of the user.", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password for this user.", }, diff --git a/builtin/credential/userpass/path_user_password.go b/builtin/credential/userpass/path_user_password.go index e3c465d2da..5007497304 100644 --- a/builtin/credential/userpass/path_user_password.go +++ b/builtin/credential/userpass/path_user_password.go @@ -14,12 +14,12 @@ func pathUserPassword(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/password$", Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username for this user.", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password for this user.", }, diff --git a/builtin/credential/userpass/path_user_policies.go b/builtin/credential/userpass/path_user_policies.go index dadc5480cc..3c01725386 100644 --- a/builtin/credential/userpass/path_user_policies.go +++ b/builtin/credential/userpass/path_user_policies.go @@ -14,16 +14,16 @@ func pathUserPolicies(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$", Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username for this user.", }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, - "token_policies": &framework.FieldSchema{ + "token_policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", }, diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go index c6b0514efd..7ec22c5fbd 100644 --- a/builtin/credential/userpass/path_users.go +++ b/builtin/credential/userpass/path_users.go @@ -33,12 +33,12 @@ func pathUsers(b *backend) *framework.Path { p := &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username"), Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username for this user.", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password for this user.", DisplayAttrs: &framework.DisplayAttributes{ @@ -46,25 +46,25 @@ func pathUsers(b *backend) *framework.Path { }, }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_ttl"), Deprecated: true, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: tokenutil.DeprecationText("token_max_ttl"), Deprecated: true, }, - "bound_cidrs": &framework.FieldSchema{ + "bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_bound_cidrs"), Deprecated: true, diff --git a/builtin/logical/aws/backend_test.go b/builtin/logical/aws/backend_test.go index 2740043993..c8823c829c 100644 --- a/builtin/logical/aws/backend_test.go +++ b/builtin/logical/aws/backend_test.go @@ -216,7 +216,6 @@ func getAccountID() (string, error) { params := &sts.GetCallerIdentityInput{} res, err := svc.GetCallerIdentity(params) - if err != nil { return "", err } @@ -912,10 +911,12 @@ const testS3Policy = `{ ] }` -const adminAccessPolicyArn = "arn:aws:iam::aws:policy/AdministratorAccess" -const ec2PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" -const iamPolicyArn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" -const dynamoPolicyArn = "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess" +const ( + adminAccessPolicyArn = "arn:aws:iam::aws:policy/AdministratorAccess" + ec2PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + iamPolicyArn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + dynamoPolicyArn = "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess" +) func testAccStepWriteRole(t *testing.T, name string, data map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ diff --git a/builtin/logical/aws/iam_policies.go b/builtin/logical/aws/iam_policies.go index a8c134de60..caf79e33d3 100644 --- a/builtin/logical/aws/iam_policies.go +++ b/builtin/logical/aws/iam_policies.go @@ -113,7 +113,7 @@ func combinePolicyDocuments(policies ...string) (string, error) { var policy string var err error var policyBytes []byte - var newPolicy = PolicyDocument{ + newPolicy := PolicyDocument{ // 2012-10-17 is the current version of the AWS policy language: // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html Version: "2012-10-17", diff --git a/builtin/logical/aws/path_config_lease.go b/builtin/logical/aws/path_config_lease.go index 1966ecb73b..b953b2305e 100644 --- a/builtin/logical/aws/path_config_lease.go +++ b/builtin/logical/aws/path_config_lease.go @@ -13,12 +13,12 @@ func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", Fields: map[string]*framework.FieldSchema{ - "lease": &framework.FieldSchema{ + "lease": { Type: framework.TypeString, Description: "Default lease for roles.", }, - "lease_max": &framework.FieldSchema{ + "lease_max": { Type: framework.TypeString, Description: "Maximum time a credential is valid for.", }, @@ -91,7 +91,6 @@ func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *f func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { lease, err := b.Lease(ctx, req.Storage) - if err != nil { return nil, err } diff --git a/builtin/logical/aws/path_config_root.go b/builtin/logical/aws/path_config_root.go index 4c854121a6..fa2f59f646 100644 --- a/builtin/logical/aws/path_config_root.go +++ b/builtin/logical/aws/path_config_root.go @@ -12,29 +12,29 @@ func pathConfigRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/root", Fields: map[string]*framework.FieldSchema{ - "access_key": &framework.FieldSchema{ + "access_key": { Type: framework.TypeString, Description: "Access key with permission to create new keys.", }, - "secret_key": &framework.FieldSchema{ + "secret_key": { Type: framework.TypeString, Description: "Secret key with permission to create new keys.", }, - "region": &framework.FieldSchema{ + "region": { Type: framework.TypeString, Description: "Region for API calls.", }, - "iam_endpoint": &framework.FieldSchema{ + "iam_endpoint": { Type: framework.TypeString, Description: "Endpoint to custom IAM server URL", }, - "sts_endpoint": &framework.FieldSchema{ + "sts_endpoint": { Type: framework.TypeString, Description: "Endpoint to custom STS server URL", }, - "max_retries": &framework.FieldSchema{ + "max_retries": { Type: framework.TypeInt, Default: aws.UseServiceDefaultRetries, Description: "Maximum number of retries for recoverable exceptions of AWS APIs", diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index e7dc241b69..3ef32f2d1d 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -18,9 +18,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -var ( - userPathRegex = regexp.MustCompile(`^\/([\x21-\x7F]{0,510}\/)?$`) -) +var userPathRegex = regexp.MustCompile(`^\/([\x21-\x7F]{0,510}\/)?$`) func pathListRoles(b *backend) *framework.Path { return &framework.Path{ @@ -39,7 +37,7 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the policy", DisplayAttrs: &framework.DisplayAttributes{ @@ -47,12 +45,12 @@ func pathRoles(b *backend) *framework.Path { }, }, - "credential_type": &framework.FieldSchema{ + "credential_type": { Type: framework.TypeString, Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred), }, - "role_arns": &framework.FieldSchema{ + "role_arns": { Type: framework.TypeCommaStringSlice, Description: "ARNs of AWS roles allowed to be assumed. Only valid when credential_type is " + assumedRoleCred, DisplayAttrs: &framework.DisplayAttributes{ @@ -60,7 +58,7 @@ func pathRoles(b *backend) *framework.Path { }, }, - "policy_arns": &framework.FieldSchema{ + "policy_arns": { Type: framework.TypeCommaStringSlice, Description: fmt.Sprintf(`ARNs of AWS policies. Behavior varies by credential_type. When credential_type is %s, then it will attach the specified policies to the generated IAM user. @@ -71,7 +69,7 @@ PolicyArns parameter, acting as a filter on permissions available.`, iamUserCred }, }, - "policy_document": &framework.FieldSchema{ + "policy_document": { Type: framework.TypeString, Description: `JSON-encoded IAM policy document. Behavior varies by credential_type. When credential_type is iam_user, then it will attach the contents of the policy_document to the IAM @@ -80,7 +78,7 @@ will be passed in as the Policy parameter to the AssumeRole or GetFederationToken API call, acting as a filter on permissions available.`, }, - "iam_groups": &framework.FieldSchema{ + "iam_groups": { Type: framework.TypeCommaStringSlice, Description: `Names of IAM groups that generated IAM users will be added to. For a credential type of assumed_role or federation_token, the policies sent to the @@ -93,7 +91,7 @@ and policy_arns parameters.`, }, }, - "iam_tags": &framework.FieldSchema{ + "iam_tags": { Type: framework.TypeKVPairs, Description: `IAM tags to be set for any users created by this role. These must be presented as Key-Value pairs. This can be represented as a map or a list of equal sign @@ -104,7 +102,7 @@ delimited key pairs.`, }, }, - "default_sts_ttl": &framework.FieldSchema{ + "default_sts_ttl": { Type: framework.TypeDurationSecond, Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred), DisplayAttrs: &framework.DisplayAttributes{ @@ -112,7 +110,7 @@ delimited key pairs.`, }, }, - "max_sts_ttl": &framework.FieldSchema{ + "max_sts_ttl": { Type: framework.TypeDurationSecond, Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred), DisplayAttrs: &framework.DisplayAttributes{ @@ -120,7 +118,7 @@ delimited key pairs.`, }, }, - "permissions_boundary_arn": &framework.FieldSchema{ + "permissions_boundary_arn": { Type: framework.TypeString, Description: "ARN of an IAM policy to attach as a permissions boundary on IAM user credentials; only valid when credential_type is" + iamUserCred, DisplayAttrs: &framework.DisplayAttributes{ @@ -128,19 +126,19 @@ delimited key pairs.`, }, }, - "arn": &framework.FieldSchema{ + "arn": { Type: framework.TypeString, Description: `Use role_arns or policy_arns instead.`, Deprecated: true, }, - "policy": &framework.FieldSchema{ + "policy": { Type: framework.TypeString, Description: "Use policy_document instead.", Deprecated: true, }, - "user_path": &framework.FieldSchema{ + "user_path": { Type: framework.TypeString, Description: "Path for IAM User. Only valid when credential_type is " + iamUserCred, DisplayAttrs: &framework.DisplayAttributes{ diff --git a/builtin/logical/aws/path_roles_test.go b/builtin/logical/aws/path_roles_test.go index d280dfcd0d..39c9d90811 100644 --- a/builtin/logical/aws/path_roles_test.go +++ b/builtin/logical/aws/path_roles_test.go @@ -159,7 +159,6 @@ func TestUpgradeLegacyPolicyEntry(t *testing.T) { } func TestUserPathValidity(t *testing.T) { - testCases := []struct { description string userPath string @@ -339,7 +338,7 @@ func TestRoleEntryValidationCredTypes(t *testing.T) { } func TestRoleEntryValidationIamUserCred(t *testing.T) { - var allowAllPolicyDocument = `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` + allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` roleEntry := awsRoleEntry{ CredentialTypes: []string{iamUserCred}, PolicyArns: []string{adminAccessPolicyARN}, @@ -384,7 +383,7 @@ func TestRoleEntryValidationIamUserCred(t *testing.T) { } func TestRoleEntryValidationAssumedRoleCred(t *testing.T) { - var allowAllPolicyDocument = `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` + allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` roleEntry := awsRoleEntry{ CredentialTypes: []string{assumedRoleCred}, RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"}, @@ -414,7 +413,7 @@ func TestRoleEntryValidationAssumedRoleCred(t *testing.T) { } func TestRoleEntryValidationFederationTokenCred(t *testing.T) { - var allowAllPolicyDocument = `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` + allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` roleEntry := awsRoleEntry{ CredentialTypes: []string{federationTokenCred}, PolicyDocument: allowAllPolicyDocument, @@ -446,5 +445,4 @@ func TestRoleEntryValidationFederationTokenCred(t *testing.T) { if roleEntry.validate() == nil { t.Errorf("bad: invalid roleEntry with unrecognized PermissionsBoundary %#v passed validation", roleEntry) } - } diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index c9b43e97ef..7645d395f6 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -20,15 +20,15 @@ func pathUser(b *backend) *framework.Path { return &framework.Path{ Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, - "role_arn": &framework.FieldSchema{ + "role_arn": { Type: framework.TypeString, Description: "ARN of role to assume when credential_type is " + assumedRoleCred, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: "Lifetime of the returned credentials in seconds", Default: 3600, diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index 3a4a3f0afa..22b4ad3fca 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -23,16 +23,16 @@ func secretAccessKeys(b *backend) *framework.Secret { return &framework.Secret{ Type: secretAccessKeyType, Fields: map[string]*framework.FieldSchema{ - "access_key": &framework.FieldSchema{ + "access_key": { Type: framework.TypeString, Description: "Access Key", }, - "secret_key": &framework.FieldSchema{ + "secret_key": { Type: framework.TypeString, Description: "Secret Key", }, - "security_token": &framework.FieldSchema{ + "security_token": { Type: framework.TypeString, Description: "Security Token", }, @@ -112,7 +112,6 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, } tokenResp, err := stsClient.GetFederationToken(getTokenInput) - if err != nil { return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) } @@ -180,7 +179,6 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, assumeRoleInput.SetPolicyArns(convertPolicyARNs(policyARNs)) } tokenResp, err := stsClient.AssumeRole(assumeRoleInput) - if err != nil { return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err) } @@ -376,7 +374,6 @@ func (b *backend) secretAccessKeysRenew(ctx context.Context, req *logical.Reques } func (b *backend) secretAccessKeysRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // STS cleans up after itself so we can skip this if is_sts internal data // element set to true. If is_sts is not set, assumes old version // and defaults to the IAM approach. diff --git a/builtin/logical/aws/secret_access_keys_test.go b/builtin/logical/aws/secret_access_keys_test.go index c7a19dec11..22f780a885 100644 --- a/builtin/logical/aws/secret_access_keys_test.go +++ b/builtin/logical/aws/secret_access_keys_test.go @@ -5,7 +5,6 @@ import ( ) func TestNormalizeDisplayName_NormRequired(t *testing.T) { - invalidNames := map[string]string{ "^#$test name\nshould be normalized)(*": "___test_name_should_be_normalized___", "^#$test name1 should be normalized)(*": "___test_name1_should_be_normalized___", @@ -25,7 +24,6 @@ func TestNormalizeDisplayName_NormRequired(t *testing.T) { } func TestNormalizeDisplayName_NormNotRequired(t *testing.T) { - validNames := []string{ "test_name_should_normalize_to_itself@example.com", "test1_name_should_normalize_to_itself@example.com", diff --git a/builtin/logical/cassandra/backend.go b/builtin/logical/cassandra/backend.go index 8dd6a83d93..e7087448d1 100644 --- a/builtin/logical/cassandra/backend.go +++ b/builtin/logical/cassandra/backend.go @@ -105,7 +105,6 @@ func (b *backend) DB(ctx context.Context, s logical.Storage) (*gocql.Session, er b.session = session return session, err - } // ResetDB forces a connection next time DB() is called. diff --git a/builtin/logical/cassandra/path_config_connection.go b/builtin/logical/cassandra/path_config_connection.go index f3e03a5b3a..db551be989 100644 --- a/builtin/logical/cassandra/path_config_connection.go +++ b/builtin/logical/cassandra/path_config_connection.go @@ -14,47 +14,47 @@ func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", Fields: map[string]*framework.FieldSchema{ - "hosts": &framework.FieldSchema{ + "hosts": { Type: framework.TypeString, Description: "Comma-separated list of hosts", }, - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "The username to use for connecting to the cluster", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "The password to use for connecting to the cluster", }, - "tls": &framework.FieldSchema{ + "tls": { Type: framework.TypeBool, Description: `Whether to use TLS. If pem_bundle or pem_json are set, this is automatically set to true`, }, - "insecure_tls": &framework.FieldSchema{ + "insecure_tls": { Type: framework.TypeBool, Description: `Whether to use TLS but skip verification; has no effect if a CA certificate is provided`, }, // TLS 1.3 is not supported as this engine is deprecated. Please switch to the Cassandra database secrets engine - "tls_min_version": &framework.FieldSchema{ + "tls_min_version": { Type: framework.TypeString, Default: "tls12", Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", }, - "pem_bundle": &framework.FieldSchema{ + "pem_bundle": { Type: framework.TypeString, Description: `PEM-format, concatenated unencrypted secret key and certificate, with optional CA certificate`, }, - "pem_json": &framework.FieldSchema{ + "pem_json": { Type: framework.TypeString, Description: `JSON containing a PEM-format, unencrypted secret key and certificate, with optional CA certificate. @@ -64,12 +64,12 @@ If both this and "pem_bundle" are specified, this will take precedence.`, }, - "protocol_version": &framework.FieldSchema{ + "protocol_version": { Type: framework.TypeInt, Description: `The protocol version to use. Defaults to 2.`, }, - "connect_timeout": &framework.FieldSchema{ + "connect_timeout": { Type: framework.TypeDurationSecond, Default: 5, Description: `The connection timeout to use. Defaults to 5.`, diff --git a/builtin/logical/cassandra/path_creds_create.go b/builtin/logical/cassandra/path_creds_create.go index 8808aa9e24..f6505cfb69 100644 --- a/builtin/logical/cassandra/path_creds_create.go +++ b/builtin/logical/cassandra/path_creds_create.go @@ -17,7 +17,7 @@ func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, diff --git a/builtin/logical/cassandra/path_roles.go b/builtin/logical/cassandra/path_roles.go index 5c24853374..df7671e47e 100644 --- a/builtin/logical/cassandra/path_roles.go +++ b/builtin/logical/cassandra/path_roles.go @@ -20,12 +20,12 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, - "creation_cql": &framework.FieldSchema{ + "creation_cql": { Type: framework.TypeString, Default: defaultCreationCQL, Description: `CQL to create a user and optionally grant @@ -38,7 +38,7 @@ file. Valid template values are '{{username}}' and '{{password}}' -- the single quotes are important!`, }, - "rollback_cql": &framework.FieldSchema{ + "rollback_cql": { Type: framework.TypeString, Default: defaultRollbackCQL, Description: `CQL to roll back an account operation. This will @@ -51,13 +51,13 @@ template values are '{{username}}' and '{{password}}' -- the single quotes are important!`, }, - "lease": &framework.FieldSchema{ + "lease": { Type: framework.TypeString, Default: "4h", Description: "The lease length; defaults to 4 hours", }, - "consistency": &framework.FieldSchema{ + "consistency": { Type: framework.TypeString, Default: "Quorum", Description: "The consistency level for the operations; defaults to Quorum.", diff --git a/builtin/logical/cassandra/secret_creds.go b/builtin/logical/cassandra/secret_creds.go index 5313655967..dfecc7a33b 100644 --- a/builtin/logical/cassandra/secret_creds.go +++ b/builtin/logical/cassandra/secret_creds.go @@ -16,12 +16,12 @@ func secretCreds(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretCredsType, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password", }, diff --git a/builtin/logical/consul/backend_test.go b/builtin/logical/consul/backend_test.go index 69dcee7ec9..11b90b4f31 100644 --- a/builtin/logical/consul/backend_test.go +++ b/builtin/logical/consul/backend_test.go @@ -198,7 +198,6 @@ func testBackendRenewRevoke(t *testing.T, version string) { if err == nil { t.Fatal("expected error") } - } func testBackendRenewRevoke14(t *testing.T, version string) { diff --git a/builtin/logical/consul/path_config.go b/builtin/logical/consul/path_config.go index 80f5c53dae..f9f1eb9139 100644 --- a/builtin/logical/consul/path_config.go +++ b/builtin/logical/consul/path_config.go @@ -13,12 +13,12 @@ func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", Fields: map[string]*framework.FieldSchema{ - "address": &framework.FieldSchema{ + "address": { Type: framework.TypeString, Description: "Consul server address", }, - "scheme": &framework.FieldSchema{ + "scheme": { Type: framework.TypeString, Description: "URI scheme for the Consul address", @@ -28,24 +28,24 @@ func pathConfigAccess(b *backend) *framework.Path { Default: "http", }, - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token for API calls", }, - "ca_cert": &framework.FieldSchema{ + "ca_cert": { Type: framework.TypeString, Description: `CA certificate to use when verifying Consul server certificate, must be x509 PEM encoded.`, }, - "client_cert": &framework.FieldSchema{ + "client_cert": { Type: framework.TypeString, Description: `Client certificate used for Consul's TLS communication, must be x509 PEM encoded and if this is set you need to also set client_key.`, }, - "client_key": &framework.FieldSchema{ + "client_key": { Type: framework.TypeString, Description: `Client key used for Consul's TLS communication, must be x509 PEM encoded and if this is set you need to also set client_cert.`, diff --git a/builtin/logical/consul/path_roles.go b/builtin/logical/consul/path_roles.go index 55c713c483..cc92c7d0e6 100644 --- a/builtin/logical/consul/path_roles.go +++ b/builtin/logical/consul/path_roles.go @@ -24,30 +24,30 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, - "policy": &framework.FieldSchema{ + "policy": { Type: framework.TypeString, Description: `Policy document, base64 encoded. Required for 'client' tokens. Required for Consul pre-1.4.`, }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: `List of policies to attach to the token. Required for Consul 1.4 or above.`, }, - "local": &framework.FieldSchema{ + "local": { Type: framework.TypeBool, Description: `Indicates that the token should not be replicated globally and instead be local to the current datacenter. Available in Consul 1.4 and above.`, }, - "token_type": &framework.FieldSchema{ + "token_type": { Type: framework.TypeString, Default: "client", Description: `Which type of token to create: 'client' @@ -56,17 +56,17 @@ the "policy" parameter is not required. Defaults to 'client'.`, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: "TTL for the Consul token created from the role.", }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: "Max TTL for the Consul token created from the role.", }, - "lease": &framework.FieldSchema{ + "lease": { Type: framework.TypeDurationSecond, Description: "Use ttl instead.", Deprecated: true, diff --git a/builtin/logical/consul/path_token.go b/builtin/logical/consul/path_token.go index f9fafc6113..dafc2f6122 100644 --- a/builtin/logical/consul/path_token.go +++ b/builtin/logical/consul/path_token.go @@ -19,7 +19,7 @@ func pathToken(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("role"), Fields: map[string]*framework.FieldSchema{ - "role": &framework.FieldSchema{ + "role": { Type: framework.TypeString, Description: "Name of the role", }, @@ -90,8 +90,8 @@ func (b *backend) pathTokenRead(ctx context.Context, req *logical.Request, d *fr return s, nil } - //Create an ACLToken for Consul 1.4 and above - var policyLink = []*api.ACLTokenPolicyLink{} + // Create an ACLToken for Consul 1.4 and above + policyLink := []*api.ACLTokenPolicyLink{} for _, policyName := range result.Policies { policyLink = append(policyLink, &api.ACLTokenPolicyLink{ Name: policyName, diff --git a/builtin/logical/consul/secret_token.go b/builtin/logical/consul/secret_token.go index 8bc09d426f..f6fbb59ae8 100644 --- a/builtin/logical/consul/secret_token.go +++ b/builtin/logical/consul/secret_token.go @@ -17,7 +17,7 @@ func secretToken(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretTokenType, Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Request token", }, diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 371d08adc4..83e8b87c3a 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -93,7 +93,6 @@ func TestBackend_PluginMain_MongoAtlas(t *testing.T) { } func TestBackend_RoleUpgrade(t *testing.T) { - storage := &logical.InmemStorage{} backend := &databaseBackend{} @@ -143,7 +142,6 @@ func TestBackend_RoleUpgrade(t *testing.T) { if !reflect.DeepEqual(role, roleExpected) { t.Fatalf("bad role %#v, %#v", role, roleExpected) } - } func TestBackend_config_connection(t *testing.T) { @@ -1025,6 +1023,7 @@ func TestBackend_roleCrud(t *testing.T) { t.Fatal("Expected response to be nil") } } + func TestBackend_allowedRoles(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() @@ -1334,7 +1333,6 @@ func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { } log.Printf("[TRACE] Generated credentials: %v", d) conn, err := pq.ParseURL(connURL) - if err != nil { t.Fatal(err) } diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index 2f0667b3dd..754f82b40f 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -39,6 +39,7 @@ func (m *mockPlugin) CreateUser(_ context.Context, statements dbplugin.Statement return usernameConf.DisplayName, "test", nil } + func (m *mockPlugin) RenewUser(_ context.Context, statements dbplugin.Statements, username string, expiration time.Time) error { err := errors.New("err") if username == "" || expiration.IsZero() { @@ -51,6 +52,7 @@ func (m *mockPlugin) RenewUser(_ context.Context, statements dbplugin.Statements return nil } + func (m *mockPlugin) RevokeUser(_ context.Context, statements dbplugin.Statements, username string) error { err := errors.New("err") if username == "" { @@ -64,9 +66,11 @@ func (m *mockPlugin) RevokeUser(_ context.Context, statements dbplugin.Statement delete(m.users, username) return nil } + func (m *mockPlugin) RotateRootCredentials(_ context.Context, statements []string) (map[string]interface{}, error) { return nil, nil } + func (m *mockPlugin) Init(_ context.Context, conf map[string]interface{}, _ bool) (map[string]interface{}, error) { err := errors.New("err") if len(conf) != 1 { @@ -75,6 +79,7 @@ func (m *mockPlugin) Init(_ context.Context, conf map[string]interface{}, _ bool return conf, nil } + func (m *mockPlugin) Initialize(_ context.Context, conf map[string]interface{}, _ bool) error { err := errors.New("err") if len(conf) != 1 { @@ -83,6 +88,7 @@ func (m *mockPlugin) Initialize(_ context.Context, conf map[string]interface{}, return nil } + func (m *mockPlugin) Close() error { m.users = nil return nil diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go index 31eacf0a5f..80702bb2d7 100644 --- a/builtin/logical/database/path_config_connection.go +++ b/builtin/logical/database/path_config_connection.go @@ -39,7 +39,7 @@ func pathResetConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of this database connection", }, @@ -83,40 +83,40 @@ func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of this database connection", }, - "plugin_name": &framework.FieldSchema{ + "plugin_name": { Type: framework.TypeString, Description: `The name of a builtin or previously registered plugin known to vault. This endpoint will create an instance of that plugin type.`, }, - "verify_connection": &framework.FieldSchema{ + "verify_connection": { Type: framework.TypeBool, Default: true, Description: `If true, the connection details are verified by actually connecting to the database. Defaults to true.`, }, - "allowed_roles": &framework.FieldSchema{ + "allowed_roles": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or array of the role names allowed to get creds from this database connection. If empty no roles are allowed. If "*" all roles are allowed.`, }, - "root_rotation_statements": &framework.FieldSchema{ + "root_rotation_statements": { Type: framework.TypeStringSlice, Description: `Specifies the database statements to be executed to rotate the root user's credentials. See the plugin's API page for more information on support and formatting for this parameter.`, }, - "password_policy": &framework.FieldSchema{ + "password_policy": { Type: framework.TypeString, Description: `Password policy to use when generating passwords.`, }, diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go index 1422f1934b..d4fbca6c39 100644 --- a/builtin/logical/database/path_creds_create.go +++ b/builtin/logical/database/path_creds_create.go @@ -13,10 +13,10 @@ import ( func pathCredsCreate(b *databaseBackend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, @@ -29,10 +29,10 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { HelpSynopsis: pathCredsCreateReadHelpSyn, HelpDescription: pathCredsCreateReadHelpDesc, }, - &framework.Path{ + { Pattern: "static-creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the static role.", }, diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go index dcaeb23432..3aefe82267 100644 --- a/builtin/logical/database/path_roles.go +++ b/builtin/logical/database/path_roles.go @@ -16,7 +16,7 @@ import ( func pathListRoles(b *databaseBackend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "roles/?$", Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -26,7 +26,7 @@ func pathListRoles(b *databaseBackend) []*framework.Path { HelpSynopsis: pathRoleHelpSyn, HelpDescription: pathRoleHelpDesc, }, - &framework.Path{ + { Pattern: "static-roles/?$", Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -41,7 +41,7 @@ func pathListRoles(b *databaseBackend) []*framework.Path { func pathRoles(b *databaseBackend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: fieldsForType(databaseRolePath), ExistenceCheck: b.pathRoleExistenceCheck, @@ -56,7 +56,7 @@ func pathRoles(b *databaseBackend) []*framework.Path { HelpDescription: pathRoleHelpDesc, }, - &framework.Path{ + { Pattern: "static-roles/" + framework.GenericNameRegex("name"), Fields: fieldsForType(databaseStaticRolePath), ExistenceCheck: b.pathStaticRoleExistenceCheck, diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index f13f974fe4..84ed3db8d3 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -13,10 +13,10 @@ import ( func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "rotate-root/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of this database connection", }, @@ -33,10 +33,10 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { HelpSynopsis: pathCredsCreateReadHelpSyn, HelpDescription: pathCredsCreateReadHelpDesc, }, - &framework.Path{ + { Pattern: "rotate-role/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the static role", }, @@ -211,6 +211,7 @@ This path attempts to rotate the root credentials for the given database. const pathRotateRoleCredentialsUpdateHelpSyn = ` Request to rotate the credentials for a static user account. ` + const pathRotateRoleCredentialsUpdateHelpDesc = ` This path attempts to rotate the credentials for the given static user account. ` diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go index a2341a89a2..d1b5359717 100644 --- a/builtin/logical/database/version_wrapper.go +++ b/builtin/logical/database/version_wrapper.go @@ -237,9 +237,7 @@ type passwordGenerator interface { GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) } -var ( - defaultPasswordGenerator = random.DefaultStringGenerator -) +var defaultPasswordGenerator = random.DefaultStringGenerator // GeneratePassword either from the v4 database or by using the provided password policy. If using a v5 database // and no password policy is specified, this will have a reasonable default password generator. diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go index e0e4cbae8b..56ec37d029 100644 --- a/builtin/logical/database/version_wrapper_test.go +++ b/builtin/logical/database/version_wrapper_test.go @@ -927,9 +927,11 @@ func (f fakeStorage) Put(ctx context.Context, entry *logical.StorageEntry) error func (f fakeStorage) List(ctx context.Context, s string) ([]string, error) { panic("list not implemented") } + func (f fakeStorage) Get(ctx context.Context, s string) (*logical.StorageEntry, error) { panic("get not implemented") } + func (f fakeStorage) Delete(ctx context.Context, s string) error { panic("delete not implemented") } diff --git a/builtin/logical/mongodb/backend_test.go b/builtin/logical/mongodb/backend_test.go index 3c116c69b6..1b85fef03c 100644 --- a/builtin/logical/mongodb/backend_test.go +++ b/builtin/logical/mongodb/backend_test.go @@ -14,9 +14,7 @@ import ( "github.com/mitchellh/mapstructure" ) -var ( - testImagePull sync.Once -) +var testImagePull sync.Once func TestBackend_config_connection(t *testing.T) { var resp *logical.Response @@ -123,7 +121,6 @@ func TestBackend_leaseWriteRead(t *testing.T) { testAccStepReadLease(), }, }) - } func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep { @@ -265,5 +262,7 @@ func testAccStepReadLease() logicaltest.TestStep { } } -const testDb = "foo" -const testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]` +const ( + testDb = "foo" + testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]` +) diff --git a/builtin/logical/mongodb/path_config_lease.go b/builtin/logical/mongodb/path_config_lease.go index b7fd3bfc6a..c64a4d1d89 100644 --- a/builtin/logical/mongodb/path_config_lease.go +++ b/builtin/logical/mongodb/path_config_lease.go @@ -50,7 +50,6 @@ func (b *backend) pathConfigLeaseWrite(ctx context.Context, req *logical.Request func (b *backend) pathConfigLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { return nil, err } diff --git a/builtin/logical/mssql/backend_test.go b/builtin/logical/mssql/backend_test.go index 1c67033115..afb239c2d3 100644 --- a/builtin/logical/mssql/backend_test.go +++ b/builtin/logical/mssql/backend_test.go @@ -105,7 +105,6 @@ func TestBackend_leaseWriteRead(t *testing.T) { testAccStepReadLease(t), }, }) - } func testAccPreCheckFunc(t *testing.T, connectionURL string) func() { diff --git a/builtin/logical/mssql/path_config_connection.go b/builtin/logical/mssql/path_config_connection.go index 933649a73d..f0ad63108e 100644 --- a/builtin/logical/mssql/path_config_connection.go +++ b/builtin/logical/mssql/path_config_connection.go @@ -13,15 +13,15 @@ func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", Fields: map[string]*framework.FieldSchema{ - "connection_string": &framework.FieldSchema{ + "connection_string": { Type: framework.TypeString, Description: "DB connection parameters", }, - "max_open_connections": &framework.FieldSchema{ + "max_open_connections": { Type: framework.TypeInt, Description: "Maximum number of open connections to database", }, - "verify_connection": &framework.FieldSchema{ + "verify_connection": { Type: framework.TypeBool, Default: true, Description: "If set, connection_string is verified by actually connecting to the database", @@ -74,7 +74,6 @@ func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, if verifyConnection { // Verify the string db, err := sql.Open("mssql", connString) - if err != nil { return logical.ErrorResponse(fmt.Sprintf( "Error validating connection info: %s", err)), nil diff --git a/builtin/logical/mssql/path_config_lease.go b/builtin/logical/mssql/path_config_lease.go index f64affa209..d0fe86dfbd 100644 --- a/builtin/logical/mssql/path_config_lease.go +++ b/builtin/logical/mssql/path_config_lease.go @@ -13,18 +13,18 @@ func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", Fields: map[string]*framework.FieldSchema{ - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeString, Description: "Default ttl for roles.", }, - "ttl_max": &framework.FieldSchema{ + "ttl_max": { Type: framework.TypeString, Description: `Deprecated: use "max_ttl" instead. Maximum time a credential is valid for.`, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeString, Description: "Maximum time a credential is valid for.", }, @@ -75,7 +75,6 @@ func (b *backend) pathConfigLeaseWrite(ctx context.Context, req *logical.Request func (b *backend) pathConfigLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { return nil, err } diff --git a/builtin/logical/mssql/path_creds_create.go b/builtin/logical/mssql/path_creds_create.go index 0638d333ad..1f8eea6f4c 100644 --- a/builtin/logical/mssql/path_creds_create.go +++ b/builtin/logical/mssql/path_creds_create.go @@ -16,7 +16,7 @@ func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, diff --git a/builtin/logical/mssql/path_roles.go b/builtin/logical/mssql/path_roles.go index 2f6a0ed640..3332db7b5c 100644 --- a/builtin/logical/mssql/path_roles.go +++ b/builtin/logical/mssql/path_roles.go @@ -27,12 +27,12 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, - "sql": &framework.FieldSchema{ + "sql": { Type: framework.TypeString, Description: "SQL string to create a role. See help for more info.", }, diff --git a/builtin/logical/mssql/secret_creds.go b/builtin/logical/mssql/secret_creds.go index 20df1823c5..a6d4e95410 100644 --- a/builtin/logical/mssql/secret_creds.go +++ b/builtin/logical/mssql/secret_creds.go @@ -17,12 +17,12 @@ func secretCreds(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretCredsType, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password", }, @@ -132,7 +132,6 @@ func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d // many permissions as possible right now var lastStmtError error for _, query := range revokeStmts { - if err := dbtxn.ExecuteDBQuery(ctx, db, nil, query); err != nil { lastStmtError = err continue diff --git a/builtin/logical/mysql/backend_test.go b/builtin/logical/mysql/backend_test.go index be99e73c7d..78615d4eee 100644 --- a/builtin/logical/mysql/backend_test.go +++ b/builtin/logical/mysql/backend_test.go @@ -160,7 +160,6 @@ func TestBackend_leaseWriteRead(t *testing.T) { testAccStepReadLease(t), }, }) - } func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { @@ -193,7 +192,6 @@ func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) } func testAccStepRole(t *testing.T, wildCard bool) logicaltest.TestStep { - pathData := make(map[string]interface{}) if wildCard == true { pathData = map[string]interface{}{ @@ -211,7 +209,6 @@ func testAccStepRole(t *testing.T, wildCard bool) logicaltest.TestStep { Path: "roles/web", Data: pathData, } - } func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { @@ -298,10 +295,12 @@ const testRoleWildCard = ` CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; GRANT SELECT ON *.* TO '{{name}}'@'%'; ` + const testRoleHost = ` CREATE USER '{{name}}'@'10.1.1.2' IDENTIFIED BY '{{password}}'; GRANT SELECT ON *.* TO '{{name}}'@'10.1.1.2'; ` + const testRevocationSQL = ` REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'10.1.1.2'; DROP USER '{{name}}'@'10.1.1.2'; diff --git a/builtin/logical/mysql/path_config_connection.go b/builtin/logical/mysql/path_config_connection.go index 45d7986284..151fd7c878 100644 --- a/builtin/logical/mysql/path_config_connection.go +++ b/builtin/logical/mysql/path_config_connection.go @@ -14,24 +14,24 @@ func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", Fields: map[string]*framework.FieldSchema{ - "connection_url": &framework.FieldSchema{ + "connection_url": { Type: framework.TypeString, Description: "DB connection string", }, - "value": &framework.FieldSchema{ + "value": { Type: framework.TypeString, Description: `DB connection string. Use 'connection_url' instead. This name is deprecated.`, }, - "max_open_connections": &framework.FieldSchema{ + "max_open_connections": { Type: framework.TypeInt, Description: "Maximum number of open connections to database", }, - "max_idle_connections": &framework.FieldSchema{ + "max_idle_connections": { Type: framework.TypeInt, Description: "Maximum number of idle connections to the database; a zero uses the value of max_open_connections and a negative value disables idle connections. If larger than max_open_connections it will be reduced to the same size.", }, - "verify_connection": &framework.FieldSchema{ + "verify_connection": { Type: framework.TypeBool, Default: true, Description: "If set, connection_url is verified by actually connecting to the database", @@ -100,7 +100,6 @@ func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, if verifyConnection { // Verify the string db, err := sql.Open("mysql", connURL) - if err != nil { return logical.ErrorResponse(fmt.Sprintf( "error validating connection info: %s", err)), nil diff --git a/builtin/logical/mysql/path_config_lease.go b/builtin/logical/mysql/path_config_lease.go index 9538a20280..e8b0543e01 100644 --- a/builtin/logical/mysql/path_config_lease.go +++ b/builtin/logical/mysql/path_config_lease.go @@ -13,12 +13,12 @@ func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", Fields: map[string]*framework.FieldSchema{ - "lease": &framework.FieldSchema{ + "lease": { Type: framework.TypeString, Description: "Default lease for roles.", }, - "lease_max": &framework.FieldSchema{ + "lease_max": { Type: framework.TypeString, Description: "Maximum time a credential is valid for.", }, @@ -66,7 +66,6 @@ func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *f func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { lease, err := b.Lease(ctx, req.Storage) - if err != nil { return nil, err } diff --git a/builtin/logical/mysql/path_role_create.go b/builtin/logical/mysql/path_role_create.go index 33a41798ba..ac237423bc 100644 --- a/builtin/logical/mysql/path_role_create.go +++ b/builtin/logical/mysql/path_role_create.go @@ -17,7 +17,7 @@ func pathRoleCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, diff --git a/builtin/logical/mysql/secret_creds.go b/builtin/logical/mysql/secret_creds.go index 7d7f69b7a2..8a1043db90 100644 --- a/builtin/logical/mysql/secret_creds.go +++ b/builtin/logical/mysql/secret_creds.go @@ -26,12 +26,12 @@ func secretCreds(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretCredsType, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password", }, diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 170531518a..d6f3c01c63 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -103,7 +103,6 @@ func prepareTestContainer(t *testing.T) (func(), *Config) { Token: nomadToken, }, nil }) - if err != nil { t.Fatalf("Could not start docker Nomad: %s", err) } diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 3044394d4c..6e55a4aa5f 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -14,31 +14,31 @@ func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", Fields: map[string]*framework.FieldSchema{ - "address": &framework.FieldSchema{ + "address": { Type: framework.TypeString, Description: "Nomad server address", }, - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token for API calls", }, - "max_token_name_length": &framework.FieldSchema{ + "max_token_name_length": { Type: framework.TypeInt, Description: "Max length for name of generated Nomad tokens", }, - "ca_cert": &framework.FieldSchema{ + "ca_cert": { Type: framework.TypeString, Description: `CA certificate to use when verifying Nomad server certificate, must be x509 PEM encoded.`, }, - "client_cert": &framework.FieldSchema{ + "client_cert": { Type: framework.TypeString, Description: `Client certificate used for Nomad's TLS communication, must be x509 PEM encoded and if this is set you need to also set client_key.`, }, - "client_key": &framework.FieldSchema{ + "client_key": { Type: framework.TypeString, Description: `Client key used for Nomad's TLS communication, must be x509 PEM encoded and if this is set you need to also set client_cert.`, diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index 314644f1d3..676e515cb8 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -14,11 +14,11 @@ func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", Fields: map[string]*framework.FieldSchema{ - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: "Duration before which the issued token needs renewal", }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: `Duration after which the issued token should not be allowed to be renewed`, }, diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index c40c8ed767..1f4553a081 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -19,7 +19,7 @@ func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 5905e8d04f..7d657f373f 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -23,22 +23,22 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, - "policies": &framework.FieldSchema{ + "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated string or list of policies as previously created in Nomad. Required for 'client' token.", }, - "global": &framework.FieldSchema{ + "global": { Type: framework.TypeBool, Description: "Boolean value describing if the token should be global or not. Defaults to false.", }, - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Default: "client", Description: `Which type of token to create: 'client' diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 215565a07d..fd446f7a64 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -17,7 +17,7 @@ func secretToken(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretTokenType, Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Request token", }, diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index 5a9c480dd2..3c18493a8d 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -226,7 +226,7 @@ func TestBackend_Roles(t *testing.T) { testCase := logicaltest.TestCase{ LogicalBackend: b, Steps: []logicaltest.TestStep{ - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "config/ca", Data: map[string]interface{}{ @@ -396,7 +396,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }))) ret := []logicaltest.TestStep{ - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/generate/exported", Data: map[string]interface{}{ @@ -411,7 +411,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, }, - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "config/urls", Data: map[string]interface{}{ @@ -421,7 +421,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, }, - logicaltest.TestStep{ + { Operation: logical.ReadOperation, Path: "config/urls", Check: func(resp *logical.Response) error { @@ -442,7 +442,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, }, - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/sign-intermediate", Data: map[string]interface{}{ @@ -463,7 +463,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, }, - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/sign-intermediate", Data: map[string]interface{}{ @@ -505,7 +505,7 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, // Same as above but exclude adding to sans - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/sign-intermediate", Data: map[string]interface{}{ @@ -582,7 +582,7 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }))) ret := []logicaltest.TestStep{ - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/generate/exported", Data: map[string]interface{}{ @@ -592,7 +592,7 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, }, - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/sign-intermediate", Data: map[string]interface{}{ @@ -603,12 +603,12 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s ErrorOk: true, }, - logicaltest.TestStep{ + { Operation: logical.DeleteOperation, Path: "root", }, - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/generate/exported", Data: map[string]interface{}{ @@ -618,7 +618,7 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s }, }, - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "root/sign-intermediate", Data: map[string]interface{}{ @@ -732,9 +732,9 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { // Adds tests with the currently configured issue/role information addTests := func(testCheck logicaltest.TestCheckFunc) { stepCount++ - //t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals) + // t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals) stepCount++ - //t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep) + // t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep) roleTestStep.Data = roleVals.ToResponseData() roleTestStep.Data["generate_lease"] = false ret = append(ret, roleTestStep) @@ -1027,7 +1027,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { getRandCsr := func(keyType string, errorOk bool, csrTemplate *x509.CertificateRequest) csrPlan { rsaKeyBits := []int{2048, 4096} ecKeyBits := []int{224, 256, 384, 521} - var plan = csrPlan{errorOk: errorOk} + plan := csrPlan{errorOk: errorOk} var testBitSize int switch keyType { @@ -1197,9 +1197,11 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - funcs := []interface{}{addCnTests, getCnCheck, getCountryCheck, getLocalityCheck, getNotBeforeCheck, + funcs := []interface{}{ + addCnTests, getCnCheck, getCountryCheck, getLocalityCheck, getNotBeforeCheck, getOrganizationCheck, getOuCheck, getPostalCodeCheck, getRandCsr, getStreetAddressCheck, - getProvinceCheck} + getProvinceCheck, + } if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { t.Logf("funcs=%d", len(funcs)) } @@ -2440,8 +2442,10 @@ func TestBackend_OID_SANs(t *testing.T) { // Valid for both oid1, type1, val1 := "1.3.6.1.4.1.311.20.2.3", "utf8", "devops@nope.com" oid2, type2, val2 := "1.3.6.1.4.1.311.20.2.4", "utf-8", "d234e@foobar.com" - otherNames := []string{fmt.Sprintf("%s;%s:%s", oid1, type1, val1), - fmt.Sprintf("%s;%s:%s", oid2, type2, val2)} + otherNames := []string{ + fmt.Sprintf("%s;%s:%s", oid1, type1, val1), + fmt.Sprintf("%s;%s:%s", oid2, type2, val2), + } resp, err = client.Logical().Write("root/issue/test", map[string]interface{}{ "common_name": "foobar.com", "ip_sans": "1.2.3.4", @@ -2797,8 +2801,10 @@ func TestBackend_AllowedDomainsTemplate(t *testing.T) { // Write role PKI. _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ - "allowed_domains": []string{"foobar.com", "zipzap.com", "{{identity.entity.aliases." + userpassAccessor + ".name}}", - "foo.{{identity.entity.aliases." + userpassAccessor + ".name}}.example.com"}, + "allowed_domains": []string{ + "foobar.com", "zipzap.com", "{{identity.entity.aliases." + userpassAccessor + ".name}}", + "foo.{{identity.entity.aliases." + userpassAccessor + ".name}}.example.com", + }, "allowed_domains_template": true, "allow_bare_domains": true, }) @@ -3026,7 +3032,6 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { if secret != nil { t.Fatalf("expected empty response data, got: %#v", secret.Data) } - } var ( diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index ce51f7d10d..71de530194 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -932,8 +932,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn if csr != nil && data.role.UseCSRSANs { if len(csr.URIs) > 0 { if len(data.role.AllowedURISANs) == 0 { - return nil, errutil.UserError{Err: fmt.Sprintf( - "URI Subject Alternative Names are not allowed in this role, but were provided via CSR"), + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names are not allowed in this role, but were provided via CSR"), } } @@ -949,8 +950,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn } if !valid { - return nil, errutil.UserError{Err: fmt.Sprintf( - "URI Subject Alternative Names were provided via CSR which are not valid for this role"), + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names were provided via CSR which are not valid for this role"), } } @@ -961,8 +963,9 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn uriAlt := data.apiData.Get("uri_sans").([]string) if len(uriAlt) > 0 { if len(data.role.AllowedURISANs) == 0 { - return nil, errutil.UserError{Err: fmt.Sprintf( - "URI Subject Alternative Names are not allowed in this role, but were provided via the API"), + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names are not allowed in this role, but were provided via the API"), } } @@ -977,15 +980,17 @@ func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAIn } if !valid { - return nil, errutil.UserError{Err: fmt.Sprintf( - "URI Subject Alternative Names were provided via the API which are not valid for this role"), + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "URI Subject Alternative Names were provided via the API which are not valid for this role"), } } parsedURI, err := url.Parse(uri) if parsedURI == nil || err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf( - "the provided URI Subject Alternative Name '%s' is not a valid URI", uri), + return nil, errutil.UserError{ + Err: fmt.Sprintf( + "the provided URI Subject Alternative Name '%s' is not a valid URI", uri), } } diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index 102fb9487d..b1f815d1a1 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -4,9 +4,8 @@ import ( "context" "fmt" "reflect" - "testing" - "strings" + "testing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" @@ -216,5 +215,4 @@ func TestPki_PermitFQDNs(t *testing.T) { t.Fatalf("Expected %v, got %v", testCase.expected, actual) } } - } diff --git a/builtin/logical/pki/crl_test.go b/builtin/logical/pki/crl_test.go index e09f7fd4cb..1aa56ba3ec 100644 --- a/builtin/logical/pki/crl_test.go +++ b/builtin/logical/pki/crl_test.go @@ -51,7 +51,7 @@ func TestBackend_CRL_EnableDisable(t *testing.T) { t.Fatal(err) } - var serials = make(map[int]string) + serials := make(map[int]string) for i := 0; i < 6; i++ { resp, err := client.Logical().Write("pki/issue/test", map[string]interface{}{ "common_name": "test.foobar.com", diff --git a/builtin/logical/pki/path_config_ca.go b/builtin/logical/pki/path_config_ca.go index 393f2d0b05..ea192a1343 100644 --- a/builtin/logical/pki/path_config_ca.go +++ b/builtin/logical/pki/path_config_ca.go @@ -14,7 +14,7 @@ func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", Fields: map[string]*framework.FieldSchema{ - "pem_bundle": &framework.FieldSchema{ + "pem_bundle": { Type: framework.TypeString, Description: `PEM-format, concatenated unencrypted secret key and certificate.`, diff --git a/builtin/logical/pki/path_config_crl.go b/builtin/logical/pki/path_config_crl.go index 988e1d882d..0df276fddc 100644 --- a/builtin/logical/pki/path_config_crl.go +++ b/builtin/logical/pki/path_config_crl.go @@ -21,13 +21,13 @@ func pathConfigCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/crl", Fields: map[string]*framework.FieldSchema{ - "expiry": &framework.FieldSchema{ + "expiry": { Type: framework.TypeString, Description: `The amount of time the generated CRL should be valid; defaults to 72 hours`, Default: "72h", }, - "disable": &framework.FieldSchema{ + "disable": { Type: framework.TypeBool, Description: `If set to true, disables generating the CRL entirely.`, }, diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go index 1644f605e1..719dc77d94 100644 --- a/builtin/logical/pki/path_config_urls.go +++ b/builtin/logical/pki/path_config_urls.go @@ -15,19 +15,19 @@ func pathConfigURLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/urls", Fields: map[string]*framework.FieldSchema{ - "issuing_certificates": &framework.FieldSchema{ + "issuing_certificates": { Type: framework.TypeCommaStringSlice, Description: `Comma-separated list of URLs to be used for the issuing certificate attribute`, }, - "crl_distribution_points": &framework.FieldSchema{ + "crl_distribution_points": { Type: framework.TypeCommaStringSlice, Description: `Comma-separated list of URLs to be used for the CRL distribution points attribute`, }, - "ocsp_servers": &framework.FieldSchema{ + "ocsp_servers": { Type: framework.TypeCommaStringSlice, Description: `Comma-separated list of URLs to be used for the OCSP servers attribute`, diff --git a/builtin/logical/pki/path_fetch.go b/builtin/logical/pki/path_fetch.go index 03b1b04267..8765f86a4f 100644 --- a/builtin/logical/pki/path_fetch.go +++ b/builtin/logical/pki/path_fetch.go @@ -59,7 +59,7 @@ func pathFetchValid(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)`, Fields: map[string]*framework.FieldSchema{ - "serial": &framework.FieldSchema{ + "serial": { Type: framework.TypeString, Description: `Certificate serial number, in colon- or hyphen-separated octal`, @@ -236,7 +236,8 @@ reply: Data: map[string]interface{}{ logical.HTTPContentType: contentType, logical.HTTPRawBody: certificate, - }} + }, + } if retErr != nil { if b.Logger().IsWarn() { b.Logger().Warn("possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr) diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index 54492072e6..efd321fd40 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -42,7 +42,7 @@ func pathSetSignedIntermediate(b *backend) *framework.Path { Pattern: "intermediate/set-signed", Fields: map[string]*framework.FieldSchema{ - "certificate": &framework.FieldSchema{ + "certificate": { Type: framework.TypeString, Description: `PEM-format certificate. This must be a CA certificate with a public key matching the diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index 76a9f86b81..ff093b7bfc 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -145,7 +145,6 @@ func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *fram // pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to // role restrictions func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - roleName := data.Get("role").(string) // Get the role if one was specified diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go index 1906e24bb2..4ab9f8367f 100644 --- a/builtin/logical/pki/path_revoke.go +++ b/builtin/logical/pki/path_revoke.go @@ -16,7 +16,7 @@ func pathRevoke(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke`, Fields: map[string]*framework.FieldSchema{ - "serial_number": &framework.FieldSchema{ + "serial_number": { Type: framework.TypeString, Description: `Certificate serial number, in colon- or hyphen-separated octal`, diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go index 4d23484079..4c61587d24 100644 --- a/builtin/logical/pki/path_roles.go +++ b/builtin/logical/pki/path_roles.go @@ -32,17 +32,17 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "backend": &framework.FieldSchema{ + "backend": { Type: framework.TypeString, Description: "Backend Type", }, - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role", }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: `The lease duration if no specific lease duration is requested. The lease duration controls the expiration @@ -53,7 +53,7 @@ the value of max_ttl.`, }, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: "The maximum allowed lease duration", DisplayAttrs: &framework.DisplayAttributes{ @@ -61,7 +61,7 @@ the value of max_ttl.`, }, }, - "allow_localhost": &framework.FieldSchema{ + "allow_localhost": { Type: framework.TypeBool, Default: true, Description: `Whether to allow "localhost" as a valid common @@ -71,7 +71,7 @@ name in a request`, }, }, - "allowed_domains": &framework.FieldSchema{ + "allowed_domains": { Type: framework.TypeCommaStringSlice, Description: `If set, clients can request certificates for subdomains directly beneath these domains, including @@ -79,13 +79,13 @@ the wildcard subdomains. See the documentation for more information. This parameter accepts a comma-separated string or list of domains.`, }, - "allowed_domains_template": &framework.FieldSchema{ + "allowed_domains_template": { Type: framework.TypeBool, Description: `If set, Allowed domains can be specified using identity template policies. Non-templated domains are also permitted.`, Default: false, }, - "allow_bare_domains": &framework.FieldSchema{ + "allow_bare_domains": { Type: framework.TypeBool, Description: `If set, clients can request certificates for the base domains themselves, e.g. "example.com". @@ -93,7 +93,7 @@ This is a separate option as in some cases this can be considered a security threat.`, }, - "allow_subdomains": &framework.FieldSchema{ + "allow_subdomains": { Type: framework.TypeBool, Description: `If set, clients can request certificates for subdomains of the CNs allowed by the other role options, @@ -101,21 +101,21 @@ including wildcard subdomains. See the documentation for more information.`, }, - "allow_glob_domains": &framework.FieldSchema{ + "allow_glob_domains": { Type: framework.TypeBool, Description: `If set, domains specified in "allowed_domains" can include glob patterns, e.g. "ftp*.example.com". See the documentation for more information.`, }, - "allow_any_name": &framework.FieldSchema{ + "allow_any_name": { Type: framework.TypeBool, Description: `If set, clients can request certificates for any CN they like. See the documentation for more information.`, }, - "enforce_hostnames": &framework.FieldSchema{ + "enforce_hostnames": { Type: framework.TypeBool, Default: true, Description: `If set, only valid host names are allowed for @@ -125,7 +125,7 @@ CN and SANs. Defaults to true.`, }, }, - "allow_ip_sans": &framework.FieldSchema{ + "allow_ip_sans": { Type: framework.TypeBool, Default: true, Description: `If set, IP Subject Alternative Names are allowed. @@ -136,7 +136,7 @@ Any valid IP is accepted.`, }, }, - "allowed_uri_sans": &framework.FieldSchema{ + "allowed_uri_sans": { Type: framework.TypeCommaStringSlice, Description: `If set, an array of allowed URIs to put in the URI Subject Alternative Names. Any valid URI is accepted, these values support globbing.`, @@ -145,7 +145,7 @@ Any valid URI is accepted, these values support globbing.`, }, }, - "allowed_other_sans": &framework.FieldSchema{ + "allowed_other_sans": { Type: framework.TypeCommaStringSlice, Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`, DisplayAttrs: &framework.DisplayAttributes{ @@ -153,12 +153,12 @@ Any valid URI is accepted, these values support globbing.`, }, }, - "allowed_serial_numbers": &framework.FieldSchema{ + "allowed_serial_numbers": { Type: framework.TypeCommaStringSlice, Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, }, - "server_flag": &framework.FieldSchema{ + "server_flag": { Type: framework.TypeBool, Default: true, Description: `If set, certificates are flagged for server auth use. @@ -168,7 +168,7 @@ Defaults to true.`, }, }, - "client_flag": &framework.FieldSchema{ + "client_flag": { Type: framework.TypeBool, Default: true, Description: `If set, certificates are flagged for client auth use. @@ -178,19 +178,19 @@ Defaults to true.`, }, }, - "code_signing_flag": &framework.FieldSchema{ + "code_signing_flag": { Type: framework.TypeBool, Description: `If set, certificates are flagged for code signing use. Defaults to false.`, }, - "email_protection_flag": &framework.FieldSchema{ + "email_protection_flag": { Type: framework.TypeBool, Description: `If set, certificates are flagged for email protection use. Defaults to false.`, }, - "key_type": &framework.FieldSchema{ + "key_type": { Type: framework.TypeString, Default: "rsa", Description: `The type of key to use; defaults to RSA. "rsa" @@ -198,7 +198,7 @@ and "ec" are the only valid values.`, AllowedValues: []interface{}{"rsa", "ec"}, }, - "key_bits": &framework.FieldSchema{ + "key_bits": { Type: framework.TypeInt, Default: 2048, Description: `The number of bits to use. You will almost @@ -206,7 +206,7 @@ certainly want to change this if you adjust the key_type.`, }, - "key_usage": &framework.FieldSchema{ + "key_usage": { Type: framework.TypeCommaStringSlice, Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, Description: `A comma-separated string or list of key usages (not extended @@ -220,7 +220,7 @@ this value to an empty list.`, }, }, - "ext_key_usage": &framework.FieldSchema{ + "ext_key_usage": { Type: framework.TypeCommaStringSlice, Default: []string{}, Description: `A comma-separated string or list of extended key usages. Valid values can be found at @@ -233,7 +233,7 @@ this value to an empty list.`, }, }, - "ext_key_usage_oids": &framework.FieldSchema{ + "ext_key_usage_oids": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated string or list of extended key usage oids.`, DisplayAttrs: &framework.DisplayAttributes{ @@ -241,7 +241,7 @@ this value to an empty list.`, }, }, - "use_csr_common_name": &framework.FieldSchema{ + "use_csr_common_name": { Type: framework.TypeBool, Default: true, Description: `If set, when used with a signing profile, @@ -254,7 +254,7 @@ Names. Defaults to true.`, }, }, - "use_csr_sans": &framework.FieldSchema{ + "use_csr_sans": { Type: framework.TypeBool, Default: true, Description: `If set, when used with a signing profile, @@ -266,7 +266,7 @@ include the Common Name (cn). Defaults to true.`, }, }, - "ou": &framework.FieldSchema{ + "ou": { Type: framework.TypeCommaStringSlice, Description: `If set, OU (OrganizationalUnit) will be set to this value in certificates issued by this role.`, @@ -275,19 +275,19 @@ this value in certificates issued by this role.`, }, }, - "organization": &framework.FieldSchema{ + "organization": { Type: framework.TypeCommaStringSlice, Description: `If set, O (Organization) will be set to this value in certificates issued by this role.`, }, - "country": &framework.FieldSchema{ + "country": { Type: framework.TypeCommaStringSlice, Description: `If set, Country will be set to this value in certificates issued by this role.`, }, - "locality": &framework.FieldSchema{ + "locality": { Type: framework.TypeCommaStringSlice, Description: `If set, Locality will be set to this value in certificates issued by this role.`, @@ -296,7 +296,7 @@ this value in certificates issued by this role.`, }, }, - "province": &framework.FieldSchema{ + "province": { Type: framework.TypeCommaStringSlice, Description: `If set, Province will be set to this value in certificates issued by this role.`, @@ -305,19 +305,19 @@ this value in certificates issued by this role.`, }, }, - "street_address": &framework.FieldSchema{ + "street_address": { Type: framework.TypeCommaStringSlice, Description: `If set, Street Address will be set to this value in certificates issued by this role.`, }, - "postal_code": &framework.FieldSchema{ + "postal_code": { Type: framework.TypeCommaStringSlice, Description: `If set, Postal Code will be set to this value in certificates issued by this role.`, }, - "generate_lease": &framework.FieldSchema{ + "generate_lease": { Type: framework.TypeBool, Description: ` If set, certificates issued/signed against this role will have Vault leases @@ -330,7 +330,7 @@ lifetimes, it is recommended that lease generation be disabled, as large amount leases adversely affect the startup time of Vault.`, }, - "no_store": &framework.FieldSchema{ + "no_store": { Type: framework.TypeBool, Description: ` If set, certificates issued/signed against this role will not be stored in the @@ -341,7 +341,7 @@ non-sensitive, or extremely short-lived. This option implies a value of "false" for "generate_lease".`, }, - "require_cn": &framework.FieldSchema{ + "require_cn": { Type: framework.TypeBool, Default: true, Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`, @@ -350,19 +350,19 @@ for "generate_lease".`, }, }, - "policy_identifiers": &framework.FieldSchema{ + "policy_identifiers": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated string or list of policy oids.`, }, - "basic_constraints_valid_for_non_ca": &framework.FieldSchema{ + "basic_constraints_valid_for_non_ca": { Type: framework.TypeBool, Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, DisplayAttrs: &framework.DisplayAttributes{ Name: "Basic Constraints Valid for Non-CA", }, }, - "not_before_duration": &framework.FieldSchema{ + "not_before_duration": { Type: framework.TypeDurationSecond, Default: 30, Description: `The duration before now the cert needs to be created / signed.`, diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index ec94909a0a..5933a740b2 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -100,7 +100,7 @@ func pathSignSelfIssued(b *backend) *framework.Path { }, Fields: map[string]*framework.FieldSchema{ - "certificate": &framework.FieldSchema{ + "certificate": { Type: framework.TypeString, Description: `PEM-format self-issued certificate to be signed.`, }, diff --git a/builtin/logical/pki/path_tidy.go b/builtin/logical/pki/path_tidy.go index 2a03984402..5fd8016c0c 100644 --- a/builtin/logical/pki/path_tidy.go +++ b/builtin/logical/pki/path_tidy.go @@ -18,31 +18,31 @@ func pathTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy", Fields: map[string]*framework.FieldSchema{ - "tidy_cert_store": &framework.FieldSchema{ + "tidy_cert_store": { Type: framework.TypeBool, Description: `Set to true to enable tidying up the certificate store`, }, - "tidy_revocation_list": &framework.FieldSchema{ + "tidy_revocation_list": { Type: framework.TypeBool, Description: `Deprecated; synonym for 'tidy_revoked_certs`, }, - "tidy_revoked_certs": &framework.FieldSchema{ + "tidy_revoked_certs": { Type: framework.TypeBool, Description: `Set to true to expire all revoked and expired certificates, removing them both from the CRL and from storage. The CRL will be rotated if this causes any values to be removed.`, }, - "safety_buffer": &framework.FieldSchema{ + "safety_buffer": { Type: framework.TypeDurationSecond, Description: `The amount of extra time that must have passed beyond certificate expiration before it is removed from the backend storage and/or revocation list. Defaults to 72 hours.`, - Default: 259200, //72h, but TypeDurationSecond currently requires defaults to be int + Default: 259200, // 72h, but TypeDurationSecond currently requires defaults to be int }, }, diff --git a/builtin/logical/pki/secret_certs.go b/builtin/logical/pki/secret_certs.go index 3244004e39..bdbcd01ba1 100644 --- a/builtin/logical/pki/secret_certs.go +++ b/builtin/logical/pki/secret_certs.go @@ -15,16 +15,16 @@ func secretCerts(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretCertsType, Fields: map[string]*framework.FieldSchema{ - "certificate": &framework.FieldSchema{ + "certificate": { Type: framework.TypeString, Description: `The PEM-encoded concatenated certificate and issuing certificate authority`, }, - "private_key": &framework.FieldSchema{ + "private_key": { Type: framework.TypeString, Description: "The PEM-encoded private key for the certificate", }, - "serial": &framework.FieldSchema{ + "serial": { Type: framework.TypeString, Description: `The serial number of the certificate, for handy reference`, diff --git a/builtin/logical/postgresql/backend_test.go b/builtin/logical/postgresql/backend_test.go index 739591e93e..090fd46f9c 100644 --- a/builtin/logical/postgresql/backend_test.go +++ b/builtin/logical/postgresql/backend_test.go @@ -273,7 +273,6 @@ func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, na } log.Printf("[TRACE] Generated credentials: %v", d) conn, err := pq.ParseURL(connURL) - if err != nil { t.Fatal(err) } @@ -358,7 +357,6 @@ func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, } log.Printf("[TRACE] Generated credentials: %v", d) conn, err := pq.ParseURL(connURL) - if err != nil { t.Fatal(err) } @@ -413,7 +411,6 @@ func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, na } log.Printf("[TRACE] Generated credentials: %v", d) conn, err := pq.ParseURL(connURL) - if err != nil { t.Fatal(err) } diff --git a/builtin/logical/postgresql/path_config_connection.go b/builtin/logical/postgresql/path_config_connection.go index e1db1abed3..d3e19349b3 100644 --- a/builtin/logical/postgresql/path_config_connection.go +++ b/builtin/logical/postgresql/path_config_connection.go @@ -14,31 +14,31 @@ func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", Fields: map[string]*framework.FieldSchema{ - "connection_url": &framework.FieldSchema{ + "connection_url": { Type: framework.TypeString, Description: "DB connection string", }, - "value": &framework.FieldSchema{ + "value": { Type: framework.TypeString, Description: `DB connection string. Use 'connection_url' instead. This will be deprecated.`, }, - "verify_connection": &framework.FieldSchema{ + "verify_connection": { Type: framework.TypeBool, Default: true, Description: `If set, connection_url is verified by actually connecting to the database`, }, - "max_open_connections": &framework.FieldSchema{ + "max_open_connections": { Type: framework.TypeInt, Description: `Maximum number of open connections to the database; a zero uses the default value of two and a negative value means unlimited`, }, - "max_idle_connections": &framework.FieldSchema{ + "max_idle_connections": { Type: framework.TypeInt, Description: `Maximum number of idle connections to the database; a zero uses the value of max_open_connections diff --git a/builtin/logical/postgresql/path_config_lease.go b/builtin/logical/postgresql/path_config_lease.go index 0307d6ee30..1a86059260 100644 --- a/builtin/logical/postgresql/path_config_lease.go +++ b/builtin/logical/postgresql/path_config_lease.go @@ -13,12 +13,12 @@ func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", Fields: map[string]*framework.FieldSchema{ - "lease": &framework.FieldSchema{ + "lease": { Type: framework.TypeString, Description: "Default lease for roles.", }, - "lease_max": &framework.FieldSchema{ + "lease_max": { Type: framework.TypeString, Description: "Maximum time a credential is valid for.", }, @@ -66,7 +66,6 @@ func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *f func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { lease, err := b.Lease(ctx, req.Storage) - if err != nil { return nil, err } diff --git a/builtin/logical/postgresql/path_role_create.go b/builtin/logical/postgresql/path_role_create.go index 99f553a1c3..168456a5f3 100644 --- a/builtin/logical/postgresql/path_role_create.go +++ b/builtin/logical/postgresql/path_role_create.go @@ -18,7 +18,7 @@ func pathRoleCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, diff --git a/builtin/logical/postgresql/secret_creds.go b/builtin/logical/postgresql/secret_creds.go index 4a72d7a1a6..a75ffe10f8 100644 --- a/builtin/logical/postgresql/secret_creds.go +++ b/builtin/logical/postgresql/secret_creds.go @@ -21,12 +21,12 @@ func secretCreds(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretCredsType, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password", }, diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go index 35da7d7d59..8b5b21b0a9 100644 --- a/builtin/logical/rabbitmq/backend_test.go +++ b/builtin/logical/rabbitmq/backend_test.go @@ -80,7 +80,6 @@ func TestBackend_basic(t *testing.T) { testAccStepReadCreds(t, b, uri, roleName), }, }) - } func TestBackend_returnsErrs(t *testing.T) { diff --git a/builtin/logical/rabbitmq/path_config_connection.go b/builtin/logical/rabbitmq/path_config_connection.go index cd41b73c8a..b3564ed731 100644 --- a/builtin/logical/rabbitmq/path_config_connection.go +++ b/builtin/logical/rabbitmq/path_config_connection.go @@ -17,24 +17,24 @@ func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", Fields: map[string]*framework.FieldSchema{ - "connection_uri": &framework.FieldSchema{ + "connection_uri": { Type: framework.TypeString, Description: "RabbitMQ Management URI", }, - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username of a RabbitMQ management administrator", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password of the provided RabbitMQ management user", }, - "verify_connection": &framework.FieldSchema{ + "verify_connection": { Type: framework.TypeBool, Default: true, Description: `If set, connection_uri is verified by actually connecting to the RabbitMQ management API`, }, - "password_policy": &framework.FieldSchema{ + "password_policy": { Type: framework.TypeString, Description: "Name of the password policy to use to generate passwords for dynamic credentials.", }, diff --git a/builtin/logical/rabbitmq/path_config_lease.go b/builtin/logical/rabbitmq/path_config_lease.go index 740ec60937..0b6bb57218 100644 --- a/builtin/logical/rabbitmq/path_config_lease.go +++ b/builtin/logical/rabbitmq/path_config_lease.go @@ -13,12 +13,12 @@ func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", Fields: map[string]*framework.FieldSchema{ - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Default: 0, Description: "Duration before which the issued credentials needs renewal", }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Default: 0, Description: `Duration after which the issued credentials should not be allowed to be renewed`, diff --git a/builtin/logical/rabbitmq/path_role_create.go b/builtin/logical/rabbitmq/path_role_create.go index 77f30d2dc6..10f19f081c 100644 --- a/builtin/logical/rabbitmq/path_role_create.go +++ b/builtin/logical/rabbitmq/path_role_create.go @@ -15,7 +15,7 @@ func pathCreds(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, diff --git a/builtin/logical/rabbitmq/path_roles.go b/builtin/logical/rabbitmq/path_roles.go index 6f16eba494..2031c7d99e 100644 --- a/builtin/logical/rabbitmq/path_roles.go +++ b/builtin/logical/rabbitmq/path_roles.go @@ -25,19 +25,19 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the role.", }, - "tags": &framework.FieldSchema{ + "tags": { Type: framework.TypeString, Description: "Comma-separated list of tags for this role.", }, - "vhosts": &framework.FieldSchema{ + "vhosts": { Type: framework.TypeString, Description: "A map of virtual hosts to permissions.", }, - "vhost_topics": &framework.FieldSchema{ + "vhost_topics": { Type: framework.TypeString, Description: "A nested map of virtual hosts and exchanges to topic permissions.", }, diff --git a/builtin/logical/rabbitmq/secret_creds.go b/builtin/logical/rabbitmq/secret_creds.go index ebe3a81497..9f047604b7 100644 --- a/builtin/logical/rabbitmq/secret_creds.go +++ b/builtin/logical/rabbitmq/secret_creds.go @@ -16,11 +16,11 @@ func secretCreds(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretCredsType, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "RabbitMQ username", }, - "password": &framework.FieldSchema{ + "password": { Type: framework.TypeString, Description: "Password for the RabbitMQ username", }, diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 36b0123f3a..3e25c646cd 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -3,21 +3,21 @@ package ssh import ( "bytes" "context" - "fmt" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/logical" - "net" - "reflect" - "strconv" - "testing" - "time" - - "golang.org/x/crypto/ssh" - "encoding/base64" "encoding/json" "errors" + "fmt" + "net" + "reflect" + "strconv" "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/logical" + + "golang.org/x/crypto/ssh" "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" @@ -818,7 +818,7 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== Steps: []logicaltest.TestStep{ configCaStep(caPublicKey, caPrivateKey), testRoleWrite(t, "testcarole", roleOptions), - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "sign/testcarole", ErrorOk: expectError, @@ -946,7 +946,7 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== configCaStep(testCAPublicKey, testCAPrivateKey), testRoleWrite(t, "testcarole", roleOptionsOldEntry), testRoleWrite(t, "testcarole", roleOptionsUpgradedEntry), - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "sign/testcarole", ErrorOk: false, @@ -956,7 +956,6 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== }, Check: func(resp *logical.Response) error { - signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) if signedKey == "" { return errors.New("no signed key in response") @@ -991,7 +990,6 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== } func TestBackend_AbleToRetrievePublicKey(t *testing.T) { - config := logical.TestBackendConfig() b, err := Factory(context.Background(), config) @@ -1004,13 +1002,12 @@ func TestBackend_AbleToRetrievePublicKey(t *testing.T) { Steps: []logicaltest.TestStep{ configCaStep(testCAPublicKey, testCAPrivateKey), - logicaltest.TestStep{ + { Operation: logical.ReadOperation, Path: "public_key", Unauthenticated: true, Check: func(resp *logical.Response) error { - key := string(resp.Data["http_raw_body"].([]byte)) if key != testCAPublicKey { @@ -1027,7 +1024,6 @@ func TestBackend_AbleToRetrievePublicKey(t *testing.T) { } func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) { - config := logical.TestBackendConfig() b, err := Factory(context.Background(), config) @@ -1039,7 +1035,7 @@ func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) { testCase := logicaltest.TestCase{ LogicalBackend: b, Steps: []logicaltest.TestStep{ - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "config/ca", Check: func(resp *logical.Response) error { @@ -1051,13 +1047,12 @@ func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) { }, }, - logicaltest.TestStep{ + { Operation: logical.ReadOperation, Path: "public_key", Unauthenticated: true, Check: func(resp *logical.Response) error { - key := string(resp.Data["http_raw_body"].([]byte)) if key == "" { @@ -1185,7 +1180,7 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { "rsa": json.Number(strconv.FormatInt(4096, 10)), }, }), - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "sign/weakkey", Data: map[string]interface{}{ @@ -1207,7 +1202,7 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { }, }), // Pass with 2048 key - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "sign/stdkey", Data: map[string]interface{}{ @@ -1215,7 +1210,7 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { }, }, // Fail with 4096 key - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "sign/stdkey", Data: map[string]interface{}{ @@ -1302,7 +1297,7 @@ func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) { "allow_user_key_ids": false, "allow_user_certificates": true, }), - logicaltest.TestStep{ + { Operation: logical.UpdateOperation, Path: "sign/testing", Data: map[string]interface{}{ @@ -1353,7 +1348,6 @@ func signCertificateStep( Data: requestParameters, Check: func(resp *logical.Response) error { - serialNumber := resp.Data["serial_number"].(string) if serialNumber == "" { return errors.New("no serial number in response") diff --git a/builtin/logical/ssh/communicator.go b/builtin/logical/ssh/communicator.go index 8bcd3ec40a..305617b33e 100644 --- a/builtin/logical/ssh/communicator.go +++ b/builtin/logical/ssh/communicator.go @@ -304,7 +304,7 @@ func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi * defer os.Remove(tf.Name()) defer tf.Close() - mode = 0644 + mode = 0o644 if _, err := io.Copy(tf, src); err != nil { return err diff --git a/builtin/logical/ssh/path_config_ca.go b/builtin/logical/ssh/path_config_ca.go index 09573a83db..c29af4ff6f 100644 --- a/builtin/logical/ssh/path_config_ca.go +++ b/builtin/logical/ssh/path_config_ca.go @@ -32,15 +32,15 @@ func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", Fields: map[string]*framework.FieldSchema{ - "private_key": &framework.FieldSchema{ + "private_key": { Type: framework.TypeString, Description: `Private half of the SSH key that will be used to sign certificates.`, }, - "public_key": &framework.FieldSchema{ + "public_key": { Type: framework.TypeString, Description: `Public half of the SSH key that will be used to sign certificates.`, }, - "generate_signing_key": &framework.FieldSchema{ + "generate_signing_key": { Type: framework.TypeBool, Description: `Generate SSH key pair internally rather than use the private_key and public_key fields.`, Default: true, diff --git a/builtin/logical/ssh/path_config_zeroaddress.go b/builtin/logical/ssh/path_config_zeroaddress.go index 6987ab5e8c..31a0e852a7 100644 --- a/builtin/logical/ssh/path_config_zeroaddress.go +++ b/builtin/logical/ssh/path_config_zeroaddress.go @@ -19,7 +19,7 @@ func pathConfigZeroAddress(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/zeroaddress", Fields: map[string]*framework.FieldSchema{ - "roles": &framework.FieldSchema{ + "roles": { Type: framework.TypeCommaStringSlice, Description: `[Required] Comma separated list of role names which allows credentials to be requested for any IP address. CIDR blocks diff --git a/builtin/logical/ssh/path_creds_create.go b/builtin/logical/ssh/path_creds_create.go index f6fa7e7660..0cf6e309ed 100644 --- a/builtin/logical/ssh/path_creds_create.go +++ b/builtin/logical/ssh/path_creds_create.go @@ -22,15 +22,15 @@ func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameWithAtRegex("role"), Fields: map[string]*framework.FieldSchema{ - "role": &framework.FieldSchema{ + "role": { Type: framework.TypeString, Description: "[Required] Name of the role", }, - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "[Optional] Username in remote host", }, - "ip": &framework.FieldSchema{ + "ip": { Type: framework.TypeString, Description: "[Required] IP of the remote host", }, diff --git a/builtin/logical/ssh/path_keys.go b/builtin/logical/ssh/path_keys.go index b933076582..6f0f7c9b2b 100644 --- a/builtin/logical/ssh/path_keys.go +++ b/builtin/logical/ssh/path_keys.go @@ -18,11 +18,11 @@ func pathKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("key_name"), Fields: map[string]*framework.FieldSchema{ - "key_name": &framework.FieldSchema{ + "key_name": { Type: framework.TypeString, Description: "[Required] Name of the key", }, - "key": &framework.FieldSchema{ + "key": { Type: framework.TypeString, Description: "[Required] SSH private key with super user privileges in host", }, diff --git a/builtin/logical/ssh/path_lookup.go b/builtin/logical/ssh/path_lookup.go index dac6874847..05b62af96a 100644 --- a/builtin/logical/ssh/path_lookup.go +++ b/builtin/logical/ssh/path_lookup.go @@ -13,7 +13,7 @@ func pathLookup(b *backend) *framework.Path { return &framework.Path{ Pattern: "lookup", Fields: map[string]*framework.FieldSchema{ - "ip": &framework.FieldSchema{ + "ip": { Type: framework.TypeString, Description: "[Required] IP address of remote host", }, diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 708e46d516..131e97d562 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -73,20 +73,20 @@ func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("role"), Fields: map[string]*framework.FieldSchema{ - "role": &framework.FieldSchema{ + "role": { Type: framework.TypeString, Description: ` [Required for all types] Name of the role being created.`, }, - "key": &framework.FieldSchema{ + "key": { Type: framework.TypeString, Description: ` [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] Name of the registered key in Vault. Before creating the role, use the 'keys/' endpoint to create a named key.`, }, - "admin_user": &framework.FieldSchema{ + "admin_user": { Type: framework.TypeString, Description: ` [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] @@ -99,7 +99,7 @@ func pathRoles(b *backend) *framework.Path { Name: "Admin Username", }, }, - "default_user": &framework.FieldSchema{ + "default_user": { Type: framework.TypeString, Description: ` [Required for Dynamic type] [Required for OTP type] [Optional for CA type] @@ -110,7 +110,7 @@ func pathRoles(b *backend) *framework.Path { Name: "Default Username", }, }, - "cidr_list": &framework.FieldSchema{ + "cidr_list": { Type: framework.TypeString, Description: ` [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] @@ -120,7 +120,7 @@ func pathRoles(b *backend) *framework.Path { Name: "CIDR List", }, }, - "exclude_cidr_list": &framework.FieldSchema{ + "exclude_cidr_list": { Type: framework.TypeString, Description: ` [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] @@ -131,7 +131,7 @@ func pathRoles(b *backend) *framework.Path { Name: "Exclude CIDR List", }, }, - "port": &framework.FieldSchema{ + "port": { Type: framework.TypeInt, Description: ` [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] @@ -143,7 +143,7 @@ func pathRoles(b *backend) *framework.Path { Value: 22, }, }, - "key_type": &framework.FieldSchema{ + "key_type": { Type: framework.TypeString, Description: ` [Required for all types] @@ -154,13 +154,13 @@ func pathRoles(b *backend) *framework.Path { Value: "ca", }, }, - "key_bits": &framework.FieldSchema{ + "key_bits": { Type: framework.TypeInt, Description: ` [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] Length of the RSA dynamic key in bits. It is 1024 by default or it can be 2048.`, }, - "install_script": &framework.FieldSchema{ + "install_script": { Type: framework.TypeString, Description: ` [Optional for Dynamic type] [Not-applicable for OTP type] [Not applicable for CA type] @@ -168,7 +168,7 @@ func pathRoles(b *backend) *framework.Path { The inbuilt default install script will be for Linux hosts. For sample script, refer the project documentation website.`, }, - "allowed_users": &framework.FieldSchema{ + "allowed_users": { Type: framework.TypeString, Description: ` [Optional for all types] [Works differently for CA type] @@ -184,7 +184,7 @@ func pathRoles(b *backend) *framework.Path { allow any user. `, }, - "allowed_users_template": &framework.FieldSchema{ + "allowed_users_template": { Type: framework.TypeBool, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -193,7 +193,7 @@ func pathRoles(b *backend) *framework.Path { `, Default: false, }, - "allowed_domains": &framework.FieldSchema{ + "allowed_domains": { Type: framework.TypeString, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -201,7 +201,7 @@ func pathRoles(b *backend) *framework.Path { valid host. If only certain domains are allowed, then this list enforces it. `, }, - "key_option_specs": &framework.FieldSchema{ + "key_option_specs": { Type: framework.TypeString, Description: ` [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] @@ -210,7 +210,7 @@ func pathRoles(b *backend) *framework.Path { file format and should not contain spaces. `, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -222,7 +222,7 @@ func pathRoles(b *backend) *framework.Path { Name: "TTL", }, }, - "max_ttl": &framework.FieldSchema{ + "max_ttl": { Type: framework.TypeDurationSecond, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -232,7 +232,7 @@ func pathRoles(b *backend) *framework.Path { Name: "Max TTL", }, }, - "allowed_critical_options": &framework.FieldSchema{ + "allowed_critical_options": { Type: framework.TypeString, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -240,7 +240,7 @@ func pathRoles(b *backend) *framework.Path { To allow any critical options, set this to an empty string. `, }, - "allowed_extensions": &framework.FieldSchema{ + "allowed_extensions": { Type: framework.TypeString, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -248,7 +248,7 @@ func pathRoles(b *backend) *framework.Path { To allow any extensions, set this to an empty string. `, }, - "default_critical_options": &framework.FieldSchema{ + "default_critical_options": { Type: framework.TypeMap, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] @@ -258,7 +258,7 @@ func pathRoles(b *backend) *framework.Path { by "allowed_critical_options". Defaults to none. `, }, - "default_extensions": &framework.FieldSchema{ + "default_extensions": { Type: framework.TypeMap, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] @@ -268,7 +268,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_extensions". Defaults to none. `, }, - "allow_user_certificates": &framework.FieldSchema{ + "allow_user_certificates": { Type: framework.TypeBool, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -276,7 +276,7 @@ func pathRoles(b *backend) *framework.Path { `, Default: false, }, - "allow_host_certificates": &framework.FieldSchema{ + "allow_host_certificates": { Type: framework.TypeBool, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -284,7 +284,7 @@ func pathRoles(b *backend) *framework.Path { `, Default: false, }, - "allow_bare_domains": &framework.FieldSchema{ + "allow_bare_domains": { Type: framework.TypeBool, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -293,14 +293,14 @@ func pathRoles(b *backend) *framework.Path { This is a separate option as in some cases this can be considered a security threat. `, }, - "allow_subdomains": &framework.FieldSchema{ + "allow_subdomains": { Type: framework.TypeBool, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains". `, }, - "allow_user_key_ids": &framework.FieldSchema{ + "allow_user_key_ids": { Type: framework.TypeBool, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -312,7 +312,7 @@ func pathRoles(b *backend) *framework.Path { Name: "Allow User Key IDs", }, }, - "key_id_format": &framework.FieldSchema{ + "key_id_format": { Type: framework.TypeString, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] @@ -325,14 +325,14 @@ func pathRoles(b *backend) *framework.Path { Name: "Key ID Format", }, }, - "allowed_user_key_lengths": &framework.FieldSchema{ + "allowed_user_key_lengths": { Type: framework.TypeMap, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] If set, allows the enforcement of key types and minimum key sizes to be signed. `, }, - "algorithm_signer": &framework.FieldSchema{ + "algorithm_signer": { Type: framework.TypeString, Description: ` When supplied, this value specifies a signing algorithm for the key. Possible values: diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 1bcd5d547b..75f6e1a292 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -46,11 +46,11 @@ func pathSign(b *backend) *framework.Path { }, Fields: map[string]*framework.FieldSchema{ - "role": &framework.FieldSchema{ + "role": { Type: framework.TypeString, Description: `The desired role with configuration for this request.`, }, - "ttl": &framework.FieldSchema{ + "ttl": { Type: framework.TypeDurationSecond, Description: `The requested Time To Live for the SSH certificate; sets the expiration date. If not specified @@ -58,28 +58,28 @@ the role default, backend default, or system default TTL is used, in that order. Cannot be later than the role max TTL.`, }, - "public_key": &framework.FieldSchema{ + "public_key": { Type: framework.TypeString, Description: `SSH public key that should be signed.`, }, - "valid_principals": &framework.FieldSchema{ + "valid_principals": { Type: framework.TypeString, Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`, }, - "cert_type": &framework.FieldSchema{ + "cert_type": { Type: framework.TypeString, Description: `Type of certificate to be created; either "user" or "host".`, Default: "user", }, - "key_id": &framework.FieldSchema{ + "key_id": { Type: framework.TypeString, Description: `Key id that the created certificate should have. If not specified, the display name of the token will be used.`, }, - "critical_options": &framework.FieldSchema{ + "critical_options": { Type: framework.TypeMap, Description: `Critical options that the certificate should be signed for.`, }, - "extensions": &framework.FieldSchema{ + "extensions": { Type: framework.TypeMap, Description: `Extensions that the certificate should be signed for.`, }, diff --git a/builtin/logical/ssh/path_verify.go b/builtin/logical/ssh/path_verify.go index 6e9ba21158..7d9814751f 100644 --- a/builtin/logical/ssh/path_verify.go +++ b/builtin/logical/ssh/path_verify.go @@ -12,7 +12,7 @@ func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: "verify", Fields: map[string]*framework.FieldSchema{ - "otp": &framework.FieldSchema{ + "otp": { Type: framework.TypeString, Description: "[Required] One-Time-Key that needs to be validated", }, diff --git a/builtin/logical/ssh/secret_dynamic_key.go b/builtin/logical/ssh/secret_dynamic_key.go index f3eba6b468..e0ee884376 100644 --- a/builtin/logical/ssh/secret_dynamic_key.go +++ b/builtin/logical/ssh/secret_dynamic_key.go @@ -16,11 +16,11 @@ func secretDynamicKey(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretDynamicKeyType, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, Description: "Username in host", }, - "ip": &framework.FieldSchema{ + "ip": { Type: framework.TypeString, Description: "IP address of host", }, diff --git a/builtin/logical/ssh/secret_otp.go b/builtin/logical/ssh/secret_otp.go index 385788dbfb..72e9903f16 100644 --- a/builtin/logical/ssh/secret_otp.go +++ b/builtin/logical/ssh/secret_otp.go @@ -14,7 +14,7 @@ func secretOTP(b *backend) *framework.Secret { return &framework.Secret{ Type: SecretOTPType, Fields: map[string]*framework.FieldSchema{ - "otp": &framework.FieldSchema{ + "otp": { Type: framework.TypeString, Description: "One time password", }, diff --git a/builtin/logical/totp/backend_test.go b/builtin/logical/totp/backend_test.go index e732cf65fe..d6bad61f5d 100644 --- a/builtin/logical/totp/backend_test.go +++ b/builtin/logical/totp/backend_test.go @@ -1007,7 +1007,7 @@ func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interfac Data: keyData, ErrorOk: expectFail, Check: func(resp *logical.Response) error { - //Skip this if the key is not generated by vault or if the test is expected to fail + // Skip this if the key is not generated by vault or if the test is expected to fail if !keyData["generate"].(bool) || expectFail { return nil } @@ -1037,7 +1037,7 @@ func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interfac return err } - //Check to see if barcode and url are returned + // Check to see if barcode and url are returned if d.Barcode == "" { t.Fatalf("a barcode was not returned for a generated key") } @@ -1046,20 +1046,19 @@ func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interfac t.Fatalf("a url was not returned for a generated key") } - //Parse url + // Parse url urlObject, err := url.Parse(d.Url) - if err != nil { t.Fatal("an error occurred while parsing url string") } - //Set up query object + // Set up query object urlQuery := urlObject.Query() - //Read secret + // Read secret urlSecret := urlQuery.Get("secret") - //Check key length + // Check key length keySize := keyData["key_size"].(int) correctSecretStringSize := (keySize / 5) * 8 actualSecretStringSize := len(urlSecret) diff --git a/builtin/logical/totp/path_code.go b/builtin/logical/totp/path_code.go index e5736dad80..d418b082e3 100644 --- a/builtin/logical/totp/path_code.go +++ b/builtin/logical/totp/path_code.go @@ -16,11 +16,11 @@ func pathCode(b *backend) *framework.Path { return &framework.Path{ Pattern: "code/" + framework.GenericNameWithAtRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key.", }, - "code": &framework.FieldSchema{ + "code": { Type: framework.TypeString, Description: "TOTP code to be validated.", }, @@ -121,6 +121,7 @@ func (b *backend) pathValidateCode(ctx context.Context, req *logical.Request, da const pathCodeHelpSyn = ` Request time-based one-time use password or validate a password for a certain key . ` + const pathCodeHelpDesc = ` This path generates and validates time-based one-time use passwords for a certain key. diff --git a/builtin/logical/totp/path_keys.go b/builtin/logical/totp/path_keys.go index 97872615f4..26c8f39c4d 100644 --- a/builtin/logical/totp/path_keys.go +++ b/builtin/logical/totp/path_keys.go @@ -205,18 +205,18 @@ func (b *backend) pathKeyCreate(ctx context.Context, req *logical.Request, data // Read parameters from url if given if inputURL != "" { - //Parse url + // Parse url urlObject, err := url.Parse(inputURL) if err != nil { return logical.ErrorResponse("an error occurred while parsing url string"), err } - //Set up query object + // Set up query object urlQuery := urlObject.Query() path := strings.TrimPrefix(urlObject.Path, "/") index := strings.Index(path, ":") - //Read issuer + // Read issuer urlIssuer := urlQuery.Get("issuer") if urlIssuer != "" { issuer = urlIssuer @@ -226,17 +226,17 @@ func (b *backend) pathKeyCreate(ctx context.Context, req *logical.Request, data } } - //Read account name + // Read account name if index == -1 { accountName = path } else { accountName = path[index+1:] } - //Read key string + // Read key string keyString = urlQuery.Get("secret") - //Read period + // Read period periodQuery := urlQuery.Get("period") if periodQuery != "" { periodInt, err := strconv.Atoi(periodQuery) @@ -246,7 +246,7 @@ func (b *backend) pathKeyCreate(ctx context.Context, req *logical.Request, data period = periodInt } - //Read digits + // Read digits digitsQuery := urlQuery.Get("digits") if digitsQuery != "" { digitsInt, err := strconv.Atoi(digitsQuery) @@ -256,7 +256,7 @@ func (b *backend) pathKeyCreate(ctx context.Context, req *logical.Request, data digits = digitsInt } - //Read algorithm + // Read algorithm algorithmQuery := urlQuery.Get("algorithm") if algorithmQuery != "" { algorithm = algorithmQuery diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index 9d8d43b38f..707256454d 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -11,7 +11,6 @@ import ( ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b, err := Backend(ctx, conf) if err != nil { return nil, err diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index 7b258a1627..bb68d9f02d 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -496,6 +496,7 @@ func testAccStepAdjustPolicyMinDecryption(t *testing.T, name string, minVer int) }, } } + func testAccStepAdjustPolicyMinEncryption(t *testing.T, name string, minVer int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, @@ -1361,7 +1362,7 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { wg := sync.WaitGroup{} funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"} - //keys := []string{"test1", "test2", "test3", "test4", "test5"} + // keys := []string{"test1", "test2", "test3", "test4", "test5"} keys := []string{"test1", "test2", "test3"} // This is the goroutine loop @@ -1383,7 +1384,7 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { var chosenFunc, chosenKey string - //t.Errorf("Starting %d", id) + // t.Errorf("Starting %d", id) for { // Stop after 10 seconds if time.Now().Sub(startTime) > 10*time.Second { @@ -1408,7 +1409,7 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { switch chosenFunc { // Encrypt our plaintext and store the result case "encrypt": - //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) fd.Raw["plaintext"] = base64.StdEncoding.EncodeToString([]byte(testPlaintext)) fd.Schema = be.pathEncrypt().Fields resp, err := be.pathEncryptWrite(context.Background(), req, fd) @@ -1419,7 +1420,7 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { // Rotate to a new key version case "rotate": - //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) fd.Schema = be.pathRotate().Fields resp, err := be.pathRotateWrite(context.Background(), req, fd) if err != nil { @@ -1428,7 +1429,7 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { // Decrypt the ciphertext and compare the result case "decrypt": - //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) ct := latestEncryptedText[chosenKey] if ct == "" { continue @@ -1460,7 +1461,7 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { // Change the min version, which also tests the archive functionality case "change_min_version": - //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) resp, err := be.pathPolicyRead(context.Background(), req, fd) if err != nil { t.Errorf("got an error reading policy %s: %v", chosenKey, err) diff --git a/builtin/logical/transit/path_backup.go b/builtin/logical/transit/path_backup.go index 4c7e090786..ef13f0aab8 100644 --- a/builtin/logical/transit/path_backup.go +++ b/builtin/logical/transit/path_backup.go @@ -11,7 +11,7 @@ func (b *backend) pathBackup() *framework.Path { return &framework.Path{ Pattern: "backup/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, @@ -39,5 +39,7 @@ func (b *backend) pathBackupRead(ctx context.Context, req *logical.Request, d *f }, nil } -const pathBackupHelpSyn = `Backup the named key` -const pathBackupHelpDesc = `This path is used to backup the named key.` +const ( + pathBackupHelpSyn = `Backup the named key` + pathBackupHelpDesc = `This path is used to backup the named key.` +) diff --git a/builtin/logical/transit/path_cache_config.go b/builtin/logical/transit/path_cache_config.go index 3351249691..6239555b37 100644 --- a/builtin/logical/transit/path_cache_config.go +++ b/builtin/logical/transit/path_cache_config.go @@ -12,7 +12,7 @@ func (b *backend) pathCacheConfig() *framework.Path { return &framework.Path{ Pattern: "cache-config", Fields: map[string]*framework.FieldSchema{ - "size": &framework.FieldSchema{ + "size": { Type: framework.TypeInt, Required: false, Default: 0, diff --git a/builtin/logical/transit/path_config.go b/builtin/logical/transit/path_config.go index 09ef18a374..4641e2b6a7 100644 --- a/builtin/logical/transit/path_config.go +++ b/builtin/logical/transit/path_config.go @@ -13,19 +13,19 @@ func (b *backend) pathConfig() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/config", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, - "min_decryption_version": &framework.FieldSchema{ + "min_decryption_version": { Type: framework.TypeInt, Description: `If set, the minimum version of the key allowed to be decrypted. For signing keys, the minimum version allowed to be used for verification.`, }, - "min_encryption_version": &framework.FieldSchema{ + "min_encryption_version": { Type: framework.TypeInt, Description: `If set, the minimum version of the key allowed to be used for encryption; or for signing keys, @@ -33,17 +33,17 @@ to be used for signing. If set to zero, only the latest version of the key is allowed.`, }, - "deletion_allowed": &framework.FieldSchema{ + "deletion_allowed": { Type: framework.TypeBool, Description: "Whether to allow deletion of the key", }, - "exportable": &framework.FieldSchema{ + "exportable": { Type: framework.TypeBool, Description: `Enables export of the key. Once set, this cannot be disabled.`, }, - "allow_plaintext_backup": &framework.FieldSchema{ + "allow_plaintext_backup": { Type: framework.TypeBool, Description: `Enables taking a backup of the named key in plaintext format. Once set, this cannot be disabled.`, }, diff --git a/builtin/logical/transit/path_datakey.go b/builtin/logical/transit/path_datakey.go index 7a987f543c..a287bea344 100644 --- a/builtin/logical/transit/path_datakey.go +++ b/builtin/logical/transit/path_datakey.go @@ -16,35 +16,35 @@ func (b *backend) pathDatakey() *framework.Path { return &framework.Path{ Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The backend key used for encrypting the data key", }, - "plaintext": &framework.FieldSchema{ + "plaintext": { Type: framework.TypeString, Description: `"plaintext" will return the key in both plaintext and ciphertext; "wrapped" will return the ciphertext only.`, }, - "context": &framework.FieldSchema{ + "context": { Type: framework.TypeString, Description: "Context for key derivation. Required for derived keys.", }, - "nonce": &framework.FieldSchema{ + "nonce": { Type: framework.TypeString, Description: "Nonce for when convergent encryption v1 is used (only in Vault 0.6.1)", }, - "bits": &framework.FieldSchema{ + "bits": { Type: framework.TypeInt, Description: `Number of bits for the key; currently 128, 256, and 512 bits are supported. Defaults to 256.`, Default: 256, }, - "key_version": &framework.FieldSchema{ + "key_version": { Type: framework.TypeInt, Description: `The version of the Vault key to use for encryption of the data key. Must be 0 (for latest) diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go index 604de73fd7..4ce3eef490 100644 --- a/builtin/logical/transit/path_decrypt.go +++ b/builtin/logical/transit/path_decrypt.go @@ -25,25 +25,25 @@ func (b *backend) pathDecrypt() *framework.Path { return &framework.Path{ Pattern: "decrypt/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the policy", }, - "ciphertext": &framework.FieldSchema{ + "ciphertext": { Type: framework.TypeString, Description: ` The ciphertext to decrypt, provided as returned by encrypt.`, }, - "context": &framework.FieldSchema{ + "context": { Type: framework.TypeString, Description: ` Base64 encoded context for key derivation. Required if key derivation is enabled.`, }, - "nonce": &framework.FieldSchema{ + "nonce": { Type: framework.TypeString, Description: ` Base64 encoded nonce value used during encryption. Must be provided if diff --git a/builtin/logical/transit/path_decrypt_test.go b/builtin/logical/transit/path_decrypt_test.go index dc357ae90b..7e4d0a38ea 100644 --- a/builtin/logical/transit/path_decrypt_test.go +++ b/builtin/logical/transit/path_decrypt_test.go @@ -3,8 +3,9 @@ package transit import ( "context" "encoding/json" - "github.com/hashicorp/vault/sdk/logical" "testing" + + "github.com/hashicorp/vault/sdk/logical" ) func TestTransit_BatchDecryption(t *testing.T) { diff --git a/builtin/logical/transit/path_export.go b/builtin/logical/transit/path_export.go index 474fce94a3..33a76cf33b 100644 --- a/builtin/logical/transit/path_export.go +++ b/builtin/logical/transit/path_export.go @@ -28,15 +28,15 @@ func (b *backend) pathExportKeys() *framework.Path { return &framework.Path{ Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"), Fields: map[string]*framework.FieldSchema{ - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: "Type of key to export (encryption-key, signing-key, hmac-key)", }, - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, - "version": &framework.FieldSchema{ + "version": { Type: framework.TypeString, Description: "Version of the key", }, diff --git a/builtin/logical/transit/path_export_test.go b/builtin/logical/transit/path_export_test.go index d811556a60..6d44894e64 100644 --- a/builtin/logical/transit/path_export_test.go +++ b/builtin/logical/transit/path_export_test.go @@ -80,7 +80,7 @@ func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { t.Fatal("unexpected number of keys found") } - for k, _ := range keys { + for k := range keys { if k != strconv.Itoa(expectedVersion) { t.Fatalf("expected version %q, received version %q", strconv.Itoa(expectedVersion), k) } diff --git a/builtin/logical/transit/path_hash.go b/builtin/logical/transit/path_hash.go index 11eb247893..f26435d88e 100644 --- a/builtin/logical/transit/path_hash.go +++ b/builtin/logical/transit/path_hash.go @@ -17,12 +17,12 @@ func (b *backend) pathHash() *framework.Path { return &framework.Path{ Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"), Fields: map[string]*framework.FieldSchema{ - "input": &framework.FieldSchema{ + "input": { Type: framework.TypeString, Description: "The base64-encoded input data", }, - "algorithm": &framework.FieldSchema{ + "algorithm": { Type: framework.TypeString, Default: "sha2-256", Description: `Algorithm to use (POST body parameter). Valid values are: @@ -35,12 +35,12 @@ func (b *backend) pathHash() *framework.Path { Defaults to "sha2-256".`, }, - "urlalgorithm": &framework.FieldSchema{ + "urlalgorithm": { Type: framework.TypeString, Description: `Algorithm to use (POST URL parameter)`, }, - "format": &framework.FieldSchema{ + "format": { Type: framework.TypeString, Default: "hex", Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`, diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go index 053a349743..6fb3e66774 100644 --- a/builtin/logical/transit/path_hmac.go +++ b/builtin/logical/transit/path_hmac.go @@ -42,17 +42,17 @@ func (b *backend) pathHMAC() *framework.Path { return &framework.Path{ Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The key to use for the HMAC function", }, - "input": &framework.FieldSchema{ + "input": { Type: framework.TypeString, Description: "The base64-encoded input data", }, - "algorithm": &framework.FieldSchema{ + "algorithm": { Type: framework.TypeString, Default: "sha2-256", Description: `Algorithm to use (POST body parameter). Valid values are: @@ -65,12 +65,12 @@ func (b *backend) pathHMAC() *framework.Path { Defaults to "sha2-256".`, }, - "urlalgorithm": &framework.FieldSchema{ + "urlalgorithm": { Type: framework.TypeString, Description: `Algorithm to use (POST URL parameter)`, }, - "key_version": &framework.FieldSchema{ + "key_version": { Type: framework.TypeInt, Description: `The version of the key to use for generating the HMAC. Must be 0 (for latest) or a value greater than or equal @@ -184,7 +184,7 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr continue } - var hf = hmac.New(hashAlg, key) + hf := hmac.New(hashAlg, key) hf.Write(input) retBytes := hf.Sum(nil) @@ -218,7 +218,6 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr } func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - name := d.Get("name").(string) algorithm := d.Get("urlalgorithm").(string) if algorithm == "" { @@ -349,7 +348,7 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f continue } - var hf = hmac.New(hashAlg, key) + hf := hmac.New(hashAlg, key) hf.Write(input) retBytes := hf.Sum(nil) response[i].Valid = hmac.Equal(retBytes, verBytes) diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index 18300c7dc2..4049d31399 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -36,12 +36,12 @@ func (b *backend) pathKeys() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Default: "aes256-gcm96", Description: ` @@ -51,14 +51,14 @@ The type of key to create. Currently, "aes128-gcm96" (symmetric), "aes256-gcm96" `, }, - "derived": &framework.FieldSchema{ + "derived": { Type: framework.TypeBool, Description: `Enables key derivation mode. This allows for per-transaction unique keys for encryption operations.`, }, - "convergent_encryption": &framework.FieldSchema{ + "convergent_encryption": { Type: framework.TypeBool, Description: `Whether to support convergent encryption. This is only supported when using a key with @@ -74,21 +74,21 @@ given context. Failing to do so will severely impact the ciphertext's security.`, }, - "exportable": &framework.FieldSchema{ + "exportable": { Type: framework.TypeBool, Description: `Enables keys to be exportable. This allows for all the valid keys in the key ring to be exported.`, }, - "allow_plaintext_backup": &framework.FieldSchema{ + "allow_plaintext_backup": { Type: framework.TypeBool, Description: `Enables taking a backup of the named key in plaintext format. Once set, this cannot be disabled.`, }, - "context": &framework.FieldSchema{ + "context": { Type: framework.TypeString, Description: `Base64 encoded context for key derivation. When reading a key with key derivation enabled, diff --git a/builtin/logical/transit/path_random.go b/builtin/logical/transit/path_random.go index 7f4c9fc39c..93810bec77 100644 --- a/builtin/logical/transit/path_random.go +++ b/builtin/logical/transit/path_random.go @@ -18,18 +18,18 @@ func (b *backend) pathRandom() *framework.Path { return &framework.Path{ Pattern: "random" + framework.OptionalParamRegex("urlbytes"), Fields: map[string]*framework.FieldSchema{ - "urlbytes": &framework.FieldSchema{ + "urlbytes": { Type: framework.TypeString, Description: "The number of bytes to generate (POST URL parameter)", }, - "bytes": &framework.FieldSchema{ + "bytes": { Type: framework.TypeInt, Default: 32, Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).", }, - "format": &framework.FieldSchema{ + "format": { Type: framework.TypeString, Default: "base64", Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`, diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go index 62e2b90a57..9943453942 100644 --- a/builtin/logical/transit/path_random_test.go +++ b/builtin/logical/transit/path_random_test.go @@ -101,5 +101,4 @@ func TestTransit_Random(t *testing.T) { req.Data["format"] = "hex" req.Data["bytes"] = maxBytes + 1 doRequest(req, true, "", 0) - } diff --git a/builtin/logical/transit/path_restore.go b/builtin/logical/transit/path_restore.go index 26ccb9c1d4..fa8c142bba 100644 --- a/builtin/logical/transit/path_restore.go +++ b/builtin/logical/transit/path_restore.go @@ -13,15 +13,15 @@ func (b *backend) pathRestore() *framework.Path { return &framework.Path{ Pattern: "restore" + framework.OptionalParamRegex("name"), Fields: map[string]*framework.FieldSchema{ - "backup": &framework.FieldSchema{ + "backup": { Type: framework.TypeString, Description: "Backed up key data to be restored. This should be the output from the 'backup/' endpoint.", }, - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "If set, this will be the name of the restored key.", }, - "force": &framework.FieldSchema{ + "force": { Type: framework.TypeBool, Description: "If set and a key by the given name exists, force the restore operation and override the key.", Default: false, @@ -54,7 +54,9 @@ func (b *backend) pathRestoreUpdate(ctx context.Context, req *logical.Request, d return nil, b.lm.RestorePolicy(ctx, req.Storage, keyName, backupB64, force) } -const pathRestoreHelpSyn = `Restore the named key` -const pathRestoreHelpDesc = `This path is used to restore the named key.` +const ( + pathRestoreHelpSyn = `Restore the named key` + pathRestoreHelpDesc = `This path is used to restore the named key.` +) var ErrInvalidKeyName = errors.New("key names cannot be paths") diff --git a/builtin/logical/transit/path_rewrap.go b/builtin/logical/transit/path_rewrap.go index 010d8a0bea..29bd2db4e8 100644 --- a/builtin/logical/transit/path_rewrap.go +++ b/builtin/logical/transit/path_rewrap.go @@ -17,27 +17,27 @@ func (b *backend) pathRewrap() *framework.Path { return &framework.Path{ Pattern: "rewrap/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, - "ciphertext": &framework.FieldSchema{ + "ciphertext": { Type: framework.TypeString, Description: "Ciphertext value to rewrap", }, - "context": &framework.FieldSchema{ + "context": { Type: framework.TypeString, Description: "Base64 encoded context for key derivation. Required for derived keys.", }, - "nonce": &framework.FieldSchema{ + "nonce": { Type: framework.TypeString, Description: "Nonce for when convergent encryption is used", }, - "key_version": &framework.FieldSchema{ + "key_version": { Type: framework.TypeInt, Description: `The version of the key to use for encryption. Must be 0 (for latest) or a value greater than or equal diff --git a/builtin/logical/transit/path_rotate.go b/builtin/logical/transit/path_rotate.go index 6b2937d1c1..3d2c2cdf40 100644 --- a/builtin/logical/transit/path_rotate.go +++ b/builtin/logical/transit/path_rotate.go @@ -12,7 +12,7 @@ func (b *backend) pathRotate() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go index efce58b379..fda81d2aa9 100644 --- a/builtin/logical/transit/path_sign_verify.go +++ b/builtin/logical/transit/path_sign_verify.go @@ -143,7 +143,7 @@ func (b *backend) pathVerify() *framework.Path { return &framework.Path{ Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The key to use", }, @@ -307,7 +307,7 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } if p.Type.HashSignatureInput() && !prehashed { - var hf = keysutil.HashFuncMap[hashAlgorithm]() + hf := keysutil.HashFuncMap[hashAlgorithm]() hf.Write(input) input = hf.Sum(nil) } diff --git a/builtin/logical/transit/path_trim.go b/builtin/logical/transit/path_trim.go index 239720ed81..cec7a5648e 100644 --- a/builtin/logical/transit/path_trim.go +++ b/builtin/logical/transit/path_trim.go @@ -12,11 +12,11 @@ func (b *backend) pathTrim() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/trim", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "Name of the key", }, - "min_available_version": &framework.FieldSchema{ + "min_available_version": { Type: framework.TypeInt, Description: ` The minimum available version for the key ring. All versions before this diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go index ea433e9927..d33fe9c1a8 100644 --- a/builtin/plugin/backend.go +++ b/builtin/plugin/backend.go @@ -194,7 +194,6 @@ func (b *PluginBackend) lazyLoadBackend(ctx context.Context, storage logical.Sto // HandleRequest is a thin wrapper implementation of HandleRequest that includes // automatic plugin reload. func (b *PluginBackend) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { - err = b.lazyLoadBackend(ctx, req.Storage, func() error { var merr error resp, merr = b.Backend.HandleRequest(ctx, req) @@ -207,7 +206,6 @@ func (b *PluginBackend) HandleRequest(ctx context.Context, req *logical.Request) // HandleExistenceCheck is a thin wrapper implementation of HandleExistenceCheck // that includes automatic plugin reload. func (b *PluginBackend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (checkFound bool, exists bool, err error) { - err = b.lazyLoadBackend(ctx, req.Storage, func() error { var merr error checkFound, exists, merr = b.Backend.HandleExistenceCheck(ctx, req) diff --git a/builtin/plugin/backend_lazyLoad_test.go b/builtin/plugin/backend_lazyLoad_test.go index b9a97dd994..53c6f96118 100644 --- a/builtin/plugin/backend_lazyLoad_test.go +++ b/builtin/plugin/backend_lazyLoad_test.go @@ -15,7 +15,6 @@ import ( ) func TestBackend_lazyLoad(t *testing.T) { - // normal load var invocations int b := testLazyLoad(t, func() error { @@ -47,7 +46,6 @@ func TestBackend_lazyLoad(t *testing.T) { } func testLazyLoad(t *testing.T, methodWrapper func() error) *PluginBackend { - sysView := newTestSystemView() ctx := context.Background() @@ -148,12 +146,15 @@ func (b *testBackend) Logger() hclog.Logger { func (b *testBackend) HandleRequest(context.Context, *logical.Request) (*logical.Response, error) { panic("not needed") } + func (b *testBackend) System() logical.SystemView { panic("not needed") } + func (b *testBackend) HandleExistenceCheck(context.Context, *logical.Request) (bool, bool, error) { panic("not needed") } + func (b *testBackend) InvalidateKey(context.Context, string) { panic("not needed") } @@ -174,7 +175,6 @@ func newTestSystemView() testSystemView { } func (v testSystemView) LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) { - return &pluginutil.PluginRunner{ Name: "test-plugin-runner", Builtin: true, diff --git a/command/agent.go b/command/agent.go index 9e2eb24248..576d9a40ed 100644 --- a/command/agent.go +++ b/command/agent.go @@ -52,8 +52,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AgentCommand)(nil) -var _ cli.CommandAutocomplete = (*AgentCommand)(nil) +var ( + _ cli.Command = (*AgentCommand)(nil) + _ cli.CommandAutocomplete = (*AgentCommand)(nil) +) type AgentCommand struct { *BaseCommand @@ -659,7 +661,7 @@ func (c *AgentCommand) Run(args []string) int { }) } - var proxyVaultToken = !config.Cache.ForceAutoAuthToken + proxyVaultToken := !config.Cache.ForceAutoAuthToken // Create the request handler cacheHandler := cache.Handler(ctx, cacheLogger, leaseCache, inmemSink, proxyVaultToken) @@ -876,7 +878,6 @@ func (c *AgentCommand) Run(args []string) int { // the request header that is used for SSRF protection. func verifyRequestHeader(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if val, ok := r.Header[consts.RequestHeaderName]; !ok || len(val) != 1 || val[0] != "true" { logical.RespondError(w, http.StatusPreconditionFailed, @@ -944,7 +945,7 @@ func (c *AgentCommand) storePidFile(pidPath string) error { } // Open the PID file - pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) if err != nil { return errwrap.Wrapf("could not open pid file: {{err}}", err) } diff --git a/command/agent/approle_end_to_end_test.go b/command/agent/approle_end_to_end_test.go index 74cd021864..35186cd8e6 100644 --- a/command/agent/approle_end_to_end_test.go +++ b/command/agent/approle_end_to_end_test.go @@ -31,7 +31,7 @@ func TestAppRoleEndToEnd(t *testing.T) { secretIDLess bool expectToken bool }{ - //default behaviour => token expected + // default behaviour => token expected {false, true, false, true}, {true, true, false, true}, @@ -40,11 +40,11 @@ func TestAppRoleEndToEnd(t *testing.T) { {false, false, false, true}, {true, false, false, true}, - //bindSecretID=false, secret not provided => token expected + // bindSecretID=false, secret not provided => token expected {false, false, true, true}, {true, false, true, true}, - //bindSecretID=true, secret not provided => token not expected + // bindSecretID=true, secret not provided => token not expected {false, true, true, false}, {true, true, true, false}, } @@ -192,8 +192,8 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo secretFromAgent = secretf.Name() secretf.Close() defer os.Remove(secretFromAgent) - //if the token is empty, auth.approle would fail reporting the error - if err := ioutil.WriteFile(secretFromAgent, []byte("wrong-secret"), 0600); err != nil { + // if the token is empty, auth.approle would fail reporting the error + if err := ioutil.WriteFile(secretFromAgent, []byte("wrong-secret"), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote secret_id_file_path with wrong-secret", "path", secretFromAgent) @@ -280,14 +280,14 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo t.Fatal("expected notexist err") } - if err := ioutil.WriteFile(role, []byte(roleID1), 0600); err != nil { + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 1", "path", role) } if bindSecretID { - if err := ioutil.WriteFile(secret, []byte(secretID1), 0600); err != nil { + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 1", "path", secret) @@ -359,14 +359,14 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo } // Write new values - if err := ioutil.WriteFile(role, []byte(roleID2), 0600); err != nil { + if err := ioutil.WriteFile(role, []byte(roleID2), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 2", "path", role) } if bindSecretID { - if err := ioutil.WriteFile(secret, []byte(secretID2), 0600); err != nil { + if err := ioutil.WriteFile(secret, []byte(secretID2), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 2", "path", secret) @@ -405,14 +405,14 @@ func TestAppRoleWithWrapping(t *testing.T) { secretIDLess bool expectToken bool }{ - //default behaviour => token expected + // default behaviour => token expected {true, false, true}, //bindSecretID=false, wrong secret provided, wrapping_path provided => token not expected //(wrapping token is not valid or does not exist) {false, false, false}, - //bindSecretID=false, no secret provided, wrapping_path provided but ignored => token expected + // bindSecretID=false, no secret provided, wrapping_path provided but ignored => token expected {false, true, true}, } for _, tc := range testCases { @@ -536,8 +536,8 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, secretFromAgent = secretf.Name() secretf.Close() defer os.Remove(secretFromAgent) - //if the token is empty, auth.approle would fail reporting the error - if err := ioutil.WriteFile(secretFromAgent, []byte("wrong-secret"), 0600); err != nil { + // if the token is empty, auth.approle would fail reporting the error + if err := ioutil.WriteFile(secretFromAgent, []byte("wrong-secret"), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote secret_id_file_path with wrong-secret", "path", secretFromAgent) @@ -624,7 +624,7 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, t.Fatal("expected notexist err") } - if err := ioutil.WriteFile(role, []byte(roleID1), 0600); err != nil { + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 1", "path", role) @@ -633,7 +633,7 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, if bindSecretID { logger.Trace("WRITING TO auth.secret-id.test.", "secret", secret, "secretID1", secretID1) - if err := ioutil.WriteFile(secret, []byte(secretID1), 0600); err != nil { + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 1", "path", secret) @@ -713,7 +713,7 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, t.Fatal(err) } secretID2 := resp.WrapInfo.Token - if err := ioutil.WriteFile(secret, []byte(secretID2), 0600); err != nil { + if err := ioutil.WriteFile(secret, []byte(secretID2), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 2", "path", secret) @@ -748,7 +748,7 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, func addConstraints(add bool, cfg map[string]interface{}) map[string]interface{} { if add { - //extraConstraints to add when bind_secret_id=false (otherwise Vault would fail with: "at least one constraint should be enabled on the role") + // extraConstraints to add when bind_secret_id=false (otherwise Vault would fail with: "at least one constraint should be enabled on the role") extraConstraints := map[string]interface{}{ "secret_id_bound_cidrs": "127.0.0.1/32", "token_bound_cidrs": "127.0.0.1/32", diff --git a/command/agent/auth/azure/azure.go b/command/agent/auth/azure/azure.go index 26a4e4af5e..4b2f7274a3 100644 --- a/command/agent/auth/azure/azure.go +++ b/command/agent/auth/azure/azure.go @@ -157,7 +157,6 @@ func getMetadataInfo(ctx context.Context, endpoint, resource string) ([]byte, er client := cleanhttp.DefaultClient() resp, err := client.Do(req) - if err != nil { return nil, errwrap.Wrapf(fmt.Sprintf("error fetching metadata from %s: {{err}}", endpoint), err) } diff --git a/command/agent/auth/kubernetes/kubernetes_test.go b/command/agent/auth/kubernetes/kubernetes_test.go index 8032d11e09..34f965c770 100644 --- a/command/agent/auth/kubernetes/kubernetes_test.go +++ b/command/agent/auth/kubernetes/kubernetes_test.go @@ -91,7 +91,6 @@ func TestKubernetesAuth_basic(t *testing.T) { } }) } - } // jwt for default service account diff --git a/command/agent/auto_auth_preload_token_end_to_end_test.go b/command/agent/auto_auth_preload_token_end_to_end_test.go index 9e059049c1..3f8d972a32 100644 --- a/command/agent/auto_auth_preload_token_end_to_end_test.go +++ b/command/agent/auto_auth_preload_token_end_to_end_test.go @@ -95,13 +95,13 @@ func TestTokenPreload_UsingAutoAuth(t *testing.T) { "secret_id_file_path": secret, } - if err := ioutil.WriteFile(role, []byte(roleID1), 0600); err != nil { + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 1", "path", role) } - if err := ioutil.WriteFile(secret, []byte(secretID1), 0600); err != nil { + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 1", "path", secret) diff --git a/command/agent/aws_end_to_end_test.go b/command/agent/aws_end_to_end_test.go index fdea3db1fc..ca7b419648 100644 --- a/command/agent/aws_end_to_end_test.go +++ b/command/agent/aws_end_to_end_test.go @@ -191,7 +191,6 @@ func TestAWSEndToEnd(t *testing.T) { } func setAwsEnvCreds() error { - cfg := &aws.Config{ Credentials: credentials.NewStaticCredentials(os.Getenv(envVarAwsTestAccessKey), os.Getenv(envVarAwsTestSecretKey), ""), } diff --git a/command/agent/cache/api_proxy_test.go b/command/agent/cache/api_proxy_test.go index 26efc0d9e6..7ce833d82c 100644 --- a/command/agent/cache/api_proxy_test.go +++ b/command/agent/cache/api_proxy_test.go @@ -2,10 +2,11 @@ package cache import ( "encoding/base64" - "github.com/go-test/deep" "net/http" "testing" + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" @@ -104,7 +105,7 @@ func TestMergeStates(t *testing.T) { expected []string } - var testCases = []testCase{ + testCases := []testCase{ { name: "empty-old", old: nil, diff --git a/command/agent/cache/cache_test.go b/command/agent/cache/cache_test.go index 49b3d07f34..bee5fc0e87 100644 --- a/command/agent/cache/cache_test.go +++ b/command/agent/cache/cache_test.go @@ -306,7 +306,7 @@ func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { // Create a muxer and add paths relevant for the lease cache layer mux := http.NewServeMux() - //mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) mux.Handle("/", Handler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) server := &http.Server{ diff --git a/command/agent/cache/cacheboltdb/bolt.go b/command/agent/cache/cacheboltdb/bolt.go index afff5c6472..69a438c180 100644 --- a/command/agent/cache/cacheboltdb/bolt.go +++ b/command/agent/cache/cacheboltdb/bolt.go @@ -66,7 +66,7 @@ type BoltStorageConfig struct { // exist. func NewBoltStorage(config *BoltStorageConfig) (*BoltStorage, error) { dbPath := filepath.Join(config.Path, DatabaseFileName) - db, err := bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, err } @@ -290,7 +290,7 @@ func (b *BoltStorage) Clear() error { // DBFileExists checks whether the vault agent cache file at `filePath` exists func DBFileExists(path string) (bool, error) { - checkFile, err := os.OpenFile(filepath.Join(path, DatabaseFileName), os.O_RDWR, 0600) + checkFile, err := os.OpenFile(filepath.Join(path, DatabaseFileName), os.O_RDWR, 0o600) defer checkFile.Close() switch { case err == nil: diff --git a/command/agent/cache/cacheboltdb/bolt_test.go b/command/agent/cache/cacheboltdb/bolt_test.go index 9e54e166a5..8dfafc4ee6 100644 --- a/command/agent/cache/cacheboltdb/bolt_test.go +++ b/command/agent/cache/cacheboltdb/bolt_test.go @@ -207,7 +207,7 @@ func TestDBFileExists(t *testing.T) { require.NoError(t, err) } if tc.createFile { - err = ioutil.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0600) + err = ioutil.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) require.NoError(t, err) } exists, err := DBFileExists(tmpPath) @@ -215,7 +215,6 @@ func TestDBFileExists(t *testing.T) { assert.Equal(t, tc.expectExist, exists) }) } - } func Test_SetGetRetrievalToken(t *testing.T) { diff --git a/command/agent/cache/cachememdb/cache_memdb.go b/command/agent/cache/cachememdb/cache_memdb.go index a28ad9a0c2..7fdad303bb 100644 --- a/command/agent/cache/cachememdb/cache_memdb.go +++ b/command/agent/cache/cachememdb/cache_memdb.go @@ -35,12 +35,12 @@ func New() (*CacheMemDB, error) { func newDB() (*memdb.MemDB, error) { cacheSchema := &memdb.DBSchema{ Tables: map[string]*memdb.TableSchema{ - tableNameIndexer: &memdb.TableSchema{ + tableNameIndexer: { Name: tableNameIndexer, Indexes: map[string]*memdb.IndexSchema{ // This index enables fetching the cached item based on the // identifier of the index. - IndexNameID: &memdb.IndexSchema{ + IndexNameID: { Name: IndexNameID, Unique: true, Indexer: &memdb.StringFieldIndex{ @@ -49,7 +49,7 @@ func newDB() (*memdb.MemDB, error) { }, // This index enables fetching all the entries in cache for // a given request path, in a given namespace. - IndexNameRequestPath: &memdb.IndexSchema{ + IndexNameRequestPath: { Name: IndexNameRequestPath, Unique: false, Indexer: &memdb.CompoundIndex{ @@ -65,7 +65,7 @@ func newDB() (*memdb.MemDB, error) { }, // This index enables fetching all the entries in cache // belonging to the leases of a given token. - IndexNameLeaseToken: &memdb.IndexSchema{ + IndexNameLeaseToken: { Name: IndexNameLeaseToken, Unique: false, AllowMissing: true, @@ -77,7 +77,7 @@ func newDB() (*memdb.MemDB, error) { // that are tied to the given token, regardless of the // entries belonging to the token or belonging to the // lease. - IndexNameToken: &memdb.IndexSchema{ + IndexNameToken: { Name: IndexNameToken, Unique: true, AllowMissing: true, @@ -87,7 +87,7 @@ func newDB() (*memdb.MemDB, error) { }, // This index enables fetching all the entries in cache for // the given parent token. - IndexNameTokenParent: &memdb.IndexSchema{ + IndexNameTokenParent: { Name: IndexNameTokenParent, Unique: false, AllowMissing: true, @@ -97,7 +97,7 @@ func newDB() (*memdb.MemDB, error) { }, // This index enables fetching all the entries in cache for // the given accessor. - IndexNameTokenAccessor: &memdb.IndexSchema{ + IndexNameTokenAccessor: { Name: IndexNameTokenAccessor, Unique: true, AllowMissing: true, @@ -107,7 +107,7 @@ func newDB() (*memdb.MemDB, error) { }, // This index enables fetching all the entries in cache for // the given lease identifier. - IndexNameLease: &memdb.IndexSchema{ + IndexNameLease: { Name: IndexNameLease, Unique: true, AllowMissing: true, diff --git a/command/agent/cache/cachememdb/index_test.go b/command/agent/cache/cachememdb/index_test.go index f603399e05..577e37d647 100644 --- a/command/agent/cache/cachememdb/index_test.go +++ b/command/agent/cache/cachememdb/index_test.go @@ -11,7 +11,6 @@ import ( ) func TestSerializeDeserialize(t *testing.T) { - testIndex := &Index{ ID: "testid", Token: "testtoken", diff --git a/command/agent/cache/keymanager/passthrough.go b/command/agent/cache/keymanager/passthrough.go index a4aff2eba2..447dd41504 100644 --- a/command/agent/cache/keymanager/passthrough.go +++ b/command/agent/cache/keymanager/passthrough.go @@ -18,7 +18,6 @@ type PassthroughKeyManager struct { // If a key is provided, it will be used as the encryption key for the wrapper, // otherwise one will be generated. func NewPassthroughKeyManager(key []byte) (*PassthroughKeyManager, error) { - var rootKey []byte = nil switch len(key) { case 0: diff --git a/command/agent/cache/lease_cache_test.go b/command/agent/cache/lease_cache_test.go index deea8f1972..32842c059d 100644 --- a/command/agent/cache/lease_cache_test.go +++ b/command/agent/cache/lease_cache_test.go @@ -631,7 +631,6 @@ func TestLeaseCache_Concurrent_NonCacheable(t *testing.T) { t.Fatalf("request timed out: %s", ctx.Err()) case <-wgDoneCh: } - } func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { @@ -927,7 +926,6 @@ func TestRegisterAutoAuth_sameToken(t *testing.T) { } func Test_hasExpired(t *testing.T) { - responses := []*SendResponse{ newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 60}}`), newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 60}`), @@ -985,7 +983,6 @@ func Test_hasExpired(t *testing.T) { require.NoError(t, err) assert.True(t, expired) } - } func TestLeaseCache_hasExpired_wrong_type(t *testing.T) { diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go index 4febcd83ce..c11867ac13 100644 --- a/command/agent/cache/listener.go +++ b/command/agent/cache/listener.go @@ -4,7 +4,6 @@ import ( "crypto/tls" "fmt" "net" - "strings" "github.com/hashicorp/vault/command/server" diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go index 7f943a1d2f..4ad056a850 100644 --- a/command/agent/cache_end_to_end_test.go +++ b/command/agent/cache_end_to_end_test.go @@ -265,13 +265,13 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { t.Fatal("expected notexist err") } - if err := ioutil.WriteFile(role, []byte(roleID1), 0600); err != nil { + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 1", "path", role) } - if err := ioutil.WriteFile(secret, []byte(secretID1), 0600); err != nil { + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 1", "path", secret) diff --git a/command/agent/cert_end_to_end_test.go b/command/agent/cert_end_to_end_test.go index bd4543d7c9..bacb188021 100644 --- a/command/agent/cert_end_to_end_test.go +++ b/command/agent/cert_end_to_end_test.go @@ -28,7 +28,6 @@ import ( ) func TestCertEndToEnd(t *testing.T) { - cases := []struct { name string withCertRoleName bool @@ -129,7 +128,7 @@ func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(dhpath, mPubKey, 0600); err != nil { + if err := ioutil.WriteFile(dhpath, mPubKey, 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote dh param file", "path", dhpath) diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index 32924d592c..38a1d19fb7 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -550,7 +550,7 @@ func TestLoadConfigFile_Template(t *testing.T) { "min": { fixturePath: "./test-fixtures/config-template-min.hcl", expectedTemplates: []*ctconfig.TemplateConfig{ - &ctconfig.TemplateConfig{ + { Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), }, @@ -559,7 +559,7 @@ func TestLoadConfigFile_Template(t *testing.T) { "full": { fixturePath: "./test-fixtures/config-template-full.hcl", expectedTemplates: []*ctconfig.TemplateConfig{ - &ctconfig.TemplateConfig{ + { Backup: pointerutil.BoolPtr(true), Command: pointerutil.StringPtr("restart service foo"), CommandTimeout: pointerutil.TimeDurationPtr("60s"), @@ -568,7 +568,7 @@ func TestLoadConfigFile_Template(t *testing.T) { Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), ErrMissingKey: pointerutil.BoolPtr(true), LeftDelim: pointerutil.StringPtr("<<"), - Perms: pointerutil.FileModePtr(0655), + Perms: pointerutil.FileModePtr(0o655), RightDelim: pointerutil.StringPtr(">>"), SandboxPath: pointerutil.StringPtr("/path/on/disk/where"), @@ -582,19 +582,19 @@ func TestLoadConfigFile_Template(t *testing.T) { "many": { fixturePath: "./test-fixtures/config-template-many.hcl", expectedTemplates: []*ctconfig.TemplateConfig{ - &ctconfig.TemplateConfig{ + { Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), ErrMissingKey: pointerutil.BoolPtr(false), CreateDestDirs: pointerutil.BoolPtr(true), Command: pointerutil.StringPtr("restart service foo"), - Perms: pointerutil.FileModePtr(0600), + Perms: pointerutil.FileModePtr(0o600), }, - &ctconfig.TemplateConfig{ + { Source: pointerutil.StringPtr("/path/on/disk/to/template2.ctmpl"), Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render2.txt"), Backup: pointerutil.BoolPtr(true), - Perms: pointerutil.FileModePtr(0755), + Perms: pointerutil.FileModePtr(0o755), Wait: &ctconfig.WaitConfig{ Min: pointerutil.TimeDurationPtr("2s"), Max: pointerutil.TimeDurationPtr("10s"), @@ -660,7 +660,7 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { "min": { fixturePath: "./test-fixtures/config-template-min-nosink.hcl", expectedTemplates: []*ctconfig.TemplateConfig{ - &ctconfig.TemplateConfig{ + { Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), }, @@ -669,7 +669,7 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { "full": { fixturePath: "./test-fixtures/config-template-full-nosink.hcl", expectedTemplates: []*ctconfig.TemplateConfig{ - &ctconfig.TemplateConfig{ + { Backup: pointerutil.BoolPtr(true), Command: pointerutil.StringPtr("restart service foo"), CommandTimeout: pointerutil.TimeDurationPtr("60s"), @@ -678,7 +678,7 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), ErrMissingKey: pointerutil.BoolPtr(true), LeftDelim: pointerutil.StringPtr("<<"), - Perms: pointerutil.FileModePtr(0655), + Perms: pointerutil.FileModePtr(0o655), RightDelim: pointerutil.StringPtr(">>"), SandboxPath: pointerutil.StringPtr("/path/on/disk/where"), @@ -692,19 +692,19 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { "many": { fixturePath: "./test-fixtures/config-template-many-nosink.hcl", expectedTemplates: []*ctconfig.TemplateConfig{ - &ctconfig.TemplateConfig{ + { Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), ErrMissingKey: pointerutil.BoolPtr(false), CreateDestDirs: pointerutil.BoolPtr(true), Command: pointerutil.StringPtr("restart service foo"), - Perms: pointerutil.FileModePtr(0600), + Perms: pointerutil.FileModePtr(0o600), }, - &ctconfig.TemplateConfig{ + { Source: pointerutil.StringPtr("/path/on/disk/to/template2.ctmpl"), Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render2.txt"), Backup: pointerutil.BoolPtr(true), - Perms: pointerutil.FileModePtr(0755), + Perms: pointerutil.FileModePtr(0o755), Wait: &ctconfig.WaitConfig{ Min: pointerutil.TimeDurationPtr("2s"), Max: pointerutil.TimeDurationPtr("10s"), diff --git a/command/agent/jwt_end_to_end_test.go b/command/agent/jwt_end_to_end_test.go index 0c45b94b82..c2d74d9f37 100644 --- a/command/agent/jwt_end_to_end_test.go +++ b/command/agent/jwt_end_to_end_test.go @@ -116,7 +116,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(dhpath, mPubKey, 0600); err != nil { + if err := ioutil.WriteFile(dhpath, mPubKey, 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote dh param file", "path", dhpath) @@ -225,7 +225,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { // Get a token jwtToken, _ := GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0600); err != nil { + if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test jwt", "path", in) @@ -336,7 +336,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { // Get another token to test the backend pushing the need to authenticate // to the handler jwtToken, _ = GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0600); err != nil { + if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } diff --git a/command/agent/sink/file/file_sink.go b/command/agent/sink/file/file_sink.go index e76e3e820a..0437aae981 100644 --- a/command/agent/sink/file/file_sink.go +++ b/command/agent/sink/file/file_sink.go @@ -30,7 +30,7 @@ func NewFileSink(conf *sink.SinkConfig) (sink.Sink, error) { f := &fileSink{ logger: conf.Logger, - mode: 0640, + mode: 0o640, } pathRaw, ok := conf.Config["path"] diff --git a/command/agent/sink/file/file_sink_test.go b/command/agent/sink/file/file_sink_test.go index f9c66608a2..9749522b49 100644 --- a/command/agent/sink/file/file_sink_test.go +++ b/command/agent/sink/file/file_sink_test.go @@ -63,7 +63,7 @@ func TestFileSink(t *testing.T) { if err != nil { t.Fatal(err) } - if fi.Mode() != os.FileMode(0640) { + if fi.Mode() != os.FileMode(0o640) { t.Fatalf("wrong file mode was detected at %s", path) } err = file.Close() @@ -93,7 +93,7 @@ func testFileSinkMode(t *testing.T, log hclog.Logger) (*sink.SinkConfig, string) Logger: log.Named("sink.file"), Config: map[string]interface{}{ "path": path, - "mode": 0644, + "mode": 0o644, }, } @@ -129,7 +129,7 @@ func TestFileSinkMode(t *testing.T) { if err != nil { t.Fatal(err) } - if fi.Mode() != os.FileMode(0644) { + if fi.Mode() != os.FileMode(0o644) { t.Fatalf("wrong file mode was detected at %s", path) } diff --git a/command/agent/sink/file/sink_test.go b/command/agent/sink/file/sink_test.go index fc18889465..839340f0c8 100644 --- a/command/agent/sink/file/sink_test.go +++ b/command/agent/sink/file/sink_test.go @@ -99,7 +99,7 @@ func TestSinkServerRetry(t *testing.T) { }) in := make(chan string) - sinks := []*sink.SinkConfig{&sink.SinkConfig{Sink: b1}, &sink.SinkConfig{Sink: b2}} + sinks := []*sink.SinkConfig{{Sink: b1}, {Sink: b2}} errCh := make(chan error) go func() { errCh <- ss.Run(ctx, in, sinks) diff --git a/command/agent/template/template_test.go b/command/agent/template/template_test.go index 52f7692a37..5efa76900e 100644 --- a/command/agent/template/template_test.go +++ b/command/agent/template/template_test.go @@ -357,7 +357,7 @@ func TestServerRun(t *testing.T) { }{ "simple": { templateMap: map[string]*templateTest{ - "render_01": &templateTest{ + "render_01": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, @@ -367,37 +367,37 @@ func TestServerRun(t *testing.T) { }, "multiple": { templateMap: map[string]*templateTest{ - "render_01": &templateTest{ + "render_01": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, }, - "render_02": &templateTest{ + "render_02": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, }, - "render_03": &templateTest{ + "render_03": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, }, - "render_04": &templateTest{ + "render_04": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, }, - "render_05": &templateTest{ + "render_05": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, }, - "render_06": &templateTest{ + "render_06": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, }, - "render_07": &templateTest{ + "render_07": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContents), }, @@ -407,7 +407,7 @@ func TestServerRun(t *testing.T) { }, "bad secret": { templateMap: map[string]*templateTest{ - "render_01": &templateTest{ + "render_01": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContentsBad), }, @@ -417,7 +417,7 @@ func TestServerRun(t *testing.T) { }, "missing key": { templateMap: map[string]*templateTest{ - "render_01": &templateTest{ + "render_01": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContentsMissingKey), ErrMissingKey: pointerutil.BoolPtr(true), @@ -428,7 +428,7 @@ func TestServerRun(t *testing.T) { }, "permission denied": { templateMap: map[string]*templateTest{ - "render_01": &templateTest{ + "render_01": { template: &ctconfig.TemplateConfig{ Contents: pointerutil.StringPtr(templateContentsPermDenied), }, diff --git a/command/agent_test.go b/command/agent_test.go index a9cfac5338..1c81ea0fd9 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -319,7 +319,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { t.Logf("config: %s", conf) jwtToken, _ := agent.GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0600); err != nil { + if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test jwt", "path", in) @@ -358,7 +358,7 @@ auto_auth { ` config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) - if err := ioutil.WriteFile(conf, []byte(config), 0600); err != nil { + if err := ioutil.WriteFile(conf, []byte(config), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test config", "path", conf) @@ -776,7 +776,7 @@ func TestAgent_Template_Basic(t *testing.T) { var templatePaths []string for i := 0; i < tc.templateCount; i++ { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := ioutil.WriteFile(fileName, []byte(templateContents(i)), 0600); err != nil { + if err := ioutil.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { t.Fatal(err) } templatePaths = append(templatePaths, fileName) @@ -907,7 +907,7 @@ auto_auth { for i := 0; i < tc.templateCount; i++ { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := ioutil.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0600); err != nil { + if err := ioutil.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { t.Fatal(err) } } @@ -1163,6 +1163,7 @@ var templates = []string{ {{- if .Data.data.password }}"password":"{{ .Data.data.password }}"{{- end }}} {{- end }}`, } + var rendered = []string{ `{"secret": "other","username":"barstuff","password":"zap"}`, `{"secret": "myapp","username":"bar","password":"zap"}`, @@ -1177,6 +1178,7 @@ func templateContents(seed int) string { index := seed % len(templates) return templates[index] } + func templateRendered(seed int) string { index := seed % len(templates) return rendered[index] @@ -1367,7 +1369,7 @@ func TestAgent_Template_Retry(t *testing.T) { // make some template files templatePath := filepath.Join(tmpDir, "render_0.tmpl") - if err := ioutil.WriteFile(templatePath, []byte(templateContents(0)), 0600); err != nil { + if err := ioutil.WriteFile(templatePath, []byte(templateContents(0)), 0o600); err != nil { t.Fatal(err) } templateConfig := fmt.Sprintf(templateConfigString, templatePath, tmpDir, "render_0.json") @@ -1460,7 +1462,6 @@ vault { default: t.Fatalf("%s expectError=%v error=%v code=%d", tcname, tc.expectError, err, code) } - }) } } diff --git a/command/audit_disable.go b/command/audit_disable.go index 1025a0ba27..ddebfcbeda 100644 --- a/command/audit_disable.go +++ b/command/audit_disable.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuditDisableCommand)(nil) -var _ cli.CommandAutocomplete = (*AuditDisableCommand)(nil) +var ( + _ cli.Command = (*AuditDisableCommand)(nil) + _ cli.CommandAutocomplete = (*AuditDisableCommand)(nil) +) type AuditDisableCommand struct { *BaseCommand diff --git a/command/audit_enable.go b/command/audit_enable.go index 85b3bac9aa..fae4239446 100644 --- a/command/audit_enable.go +++ b/command/audit_enable.go @@ -11,8 +11,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuditEnableCommand)(nil) -var _ cli.CommandAutocomplete = (*AuditEnableCommand)(nil) +var ( + _ cli.Command = (*AuditEnableCommand)(nil) + _ cli.CommandAutocomplete = (*AuditEnableCommand)(nil) +) type AuditEnableCommand struct { *BaseCommand diff --git a/command/audit_list.go b/command/audit_list.go index 392d55bae2..48cbf5fc89 100644 --- a/command/audit_list.go +++ b/command/audit_list.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuditListCommand)(nil) -var _ cli.CommandAutocomplete = (*AuditListCommand)(nil) +var ( + _ cli.Command = (*AuditListCommand)(nil) + _ cli.CommandAutocomplete = (*AuditListCommand)(nil) +) type AuditListCommand struct { *BaseCommand @@ -114,7 +116,7 @@ func (c *AuditListCommand) Run(args []string) int { func (c *AuditListCommand) simpleAudits(audits map[string]*api.Audit) []string { paths := make([]string, 0, len(audits)) - for path, _ := range audits { + for path := range audits { paths = append(paths, path) } sort.Strings(paths) @@ -134,7 +136,7 @@ func (c *AuditListCommand) simpleAudits(audits map[string]*api.Audit) []string { func (c *AuditListCommand) detailedAudits(audits map[string]*api.Audit) []string { paths := make([]string, 0, len(audits)) - for path, _ := range audits { + for path := range audits { paths = append(paths, path) } sort.Strings(paths) diff --git a/command/auth_disable.go b/command/auth_disable.go index afcfe747df..773486107a 100644 --- a/command/auth_disable.go +++ b/command/auth_disable.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuthDisableCommand)(nil) -var _ cli.CommandAutocomplete = (*AuthDisableCommand)(nil) +var ( + _ cli.Command = (*AuthDisableCommand)(nil) + _ cli.CommandAutocomplete = (*AuthDisableCommand)(nil) +) type AuthDisableCommand struct { *BaseCommand diff --git a/command/auth_enable.go b/command/auth_enable.go index bc4c52d24d..eb12589c44 100644 --- a/command/auth_enable.go +++ b/command/auth_enable.go @@ -13,8 +13,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuthEnableCommand)(nil) -var _ cli.CommandAutocomplete = (*AuthEnableCommand)(nil) +var ( + _ cli.Command = (*AuthEnableCommand)(nil) + _ cli.CommandAutocomplete = (*AuthEnableCommand)(nil) +) type AuthEnableCommand struct { *BaseCommand diff --git a/command/auth_help.go b/command/auth_help.go index 6c665fbafa..41ea7be5f5 100644 --- a/command/auth_help.go +++ b/command/auth_help.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuthHelpCommand)(nil) -var _ cli.CommandAutocomplete = (*AuthHelpCommand)(nil) +var ( + _ cli.Command = (*AuthHelpCommand)(nil) + _ cli.CommandAutocomplete = (*AuthHelpCommand)(nil) +) type AuthHelpCommand struct { *BaseCommand diff --git a/command/auth_list.go b/command/auth_list.go index fcd67dfa9d..5dc29a8282 100644 --- a/command/auth_list.go +++ b/command/auth_list.go @@ -11,8 +11,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuthListCommand)(nil) -var _ cli.CommandAutocomplete = (*AuthListCommand)(nil) +var ( + _ cli.Command = (*AuthListCommand)(nil) + _ cli.CommandAutocomplete = (*AuthListCommand)(nil) +) type AuthListCommand struct { *BaseCommand diff --git a/command/auth_tune.go b/command/auth_tune.go index cfb44c3048..0094e56c00 100644 --- a/command/auth_tune.go +++ b/command/auth_tune.go @@ -12,8 +12,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*AuthTuneCommand)(nil) -var _ cli.CommandAutocomplete = (*AuthTuneCommand)(nil) +var ( + _ cli.Command = (*AuthTuneCommand)(nil) + _ cli.CommandAutocomplete = (*AuthTuneCommand)(nil) +) type AuthTuneCommand struct { *BaseCommand diff --git a/command/base_flags.go b/command/base_flags.go index 4a9fe2f900..25e08bcc39 100644 --- a/command/base_flags.go +++ b/command/base_flags.go @@ -769,7 +769,7 @@ func (s *stringMapValue) Hidden() bool { return s.hidden } func mapToKV(m map[string]string) string { list := make([]string, 0, len(m)) - for k, _ := range m { + for k := range m { list = append(list, k) } sort.Strings(list) diff --git a/command/base_predict.go b/command/base_predict.go index 5775cdfee5..13959bb5bc 100644 --- a/command/base_predict.go +++ b/command/base_predict.go @@ -60,8 +60,10 @@ var defaultPredictVaultMounts = []string{"cubbyhole/"} // doesn't change), and the only way to configure the predict/autocomplete // client is via environment variables. Even if the user specifies a flag, we // can't parse that flag until after the command is submitted. -var predictClient *api.Client -var predictClientOnce sync.Once +var ( + predictClient *api.Client + predictClientOnce sync.Once +) // PredictClient returns the cached API client for the predictor. func PredictClient() *api.Client { diff --git a/command/commands.go b/command/commands.go index 6fb676b64c..36834b8a55 100644 --- a/command/commands.go +++ b/command/commands.go @@ -85,11 +85,11 @@ const ( // flagnameCAPath is the flag used in the base command to read in the CA // cert path. flagNameCAPath = "ca-path" - //flagNameClientCert is the flag used in the base command to read in the - //client key + // flagNameClientCert is the flag used in the base command to read in the + // client key flagNameClientKey = "client-key" - //flagNameClientCert is the flag used in the base command to read in the - //client cert + // flagNameClientCert is the flag used in the base command to read in the + // client cert flagNameClientCert = "client-cert" // flagNameTLSSkipVerify is the flag used in the base command to read in // the option to ignore TLS certificate verification. diff --git a/command/debug.go b/command/debug.go index df8e0ac685..03c3a7e2e6 100644 --- a/command/debug.go +++ b/command/debug.go @@ -73,8 +73,10 @@ type captureError struct { Timestamp time.Time `json:"timestamp"` } -var _ cli.Command = (*DebugCommand)(nil) -var _ cli.CommandAutocomplete = (*DebugCommand)(nil) +var ( + _ cli.Command = (*DebugCommand)(nil) + _ cli.CommandAutocomplete = (*DebugCommand)(nil) +) type DebugCommand struct { *BaseCommand @@ -351,7 +353,7 @@ func (c *DebugCommand) generateIndex() error { } // Write out file - if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0644); err != nil { + if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o644); err != nil { return fmt.Errorf("error generating index file; %s", err) } @@ -448,7 +450,7 @@ func (c *DebugCommand) preflight(rawArgs []string) (string, error) { _, err = os.Stat(c.flagOutput) switch { case os.IsNotExist(err): - err := os.MkdirAll(c.flagOutput, 0755) + err := os.MkdirAll(c.flagOutput, 0o755) if err != nil { return "", fmt.Errorf("unable to create output directory: %s", err) } @@ -722,7 +724,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { // Create a sub-directory for pprof data currentDir := currentTimestamp.Format(fileFriendlyTimeFormat) dirName := filepath.Join(c.flagOutput, currentDir) - if err := os.MkdirAll(dirName, 0755); err != nil { + if err := os.MkdirAll(dirName, 0o755); err != nil { c.UI.Error(fmt.Sprintf("Error creating sub-directory for time interval: %s", err)) continue } @@ -739,7 +741,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "goroutine.prof"), data, 0644) + err = ioutil.WriteFile(filepath.Join(dirName, "goroutine.prof"), data, 0o644) if err != nil { c.captureError("pprof.goroutine", err) } @@ -755,7 +757,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "heap.prof"), data, 0644) + err = ioutil.WriteFile(filepath.Join(dirName, "heap.prof"), data, 0o644) if err != nil { c.captureError("pprof.heap", err) } @@ -779,7 +781,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0644) + err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o644) if err != nil { c.captureError("pprof.profile", err) } @@ -795,7 +797,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0644) + err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o644) if err != nil { c.captureError("pprof.trace", err) } @@ -888,7 +890,7 @@ func (c *DebugCommand) persistCollection(collection []map[string]interface{}, ou if err != nil { return err } - if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0644); err != nil { + if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o644); err != nil { return err } diff --git a/command/debug_test.go b/command/debug_test.go index 55520aedaa..b7ed934832 100644 --- a/command/debug_test.go +++ b/command/debug_test.go @@ -590,7 +590,7 @@ func TestDebugCommand_OutputExists(t *testing.T) { t.Fatal(err) } } else { - err = os.Mkdir(outputPath, 0755) + err = os.Mkdir(outputPath, 0o755) if err != nil { t.Fatal(err) } diff --git a/command/delete.go b/command/delete.go index 75bbf0aabb..b22a06df61 100644 --- a/command/delete.go +++ b/command/delete.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*DeleteCommand)(nil) -var _ cli.CommandAutocomplete = (*DeleteCommand)(nil) +var ( + _ cli.Command = (*DeleteCommand)(nil) + _ cli.CommandAutocomplete = (*DeleteCommand)(nil) +) type DeleteCommand struct { *BaseCommand diff --git a/command/format_test.go b/command/format_test.go index fafb178e17..d63542a7d7 100644 --- a/command/format_test.go +++ b/command/format_test.go @@ -24,6 +24,7 @@ func (m mockUi) Ask(_ string) (string, error) { m.t.FailNow() return "", nil } + func (m mockUi) AskSecret(_ string) (string, error) { m.t.FailNow() return "", nil diff --git a/command/kv_delete.go b/command/kv_delete.go index 945b4c6758..47d66b7691 100644 --- a/command/kv_delete.go +++ b/command/kv_delete.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVDeleteCommand)(nil) -var _ cli.CommandAutocomplete = (*KVDeleteCommand)(nil) +var ( + _ cli.Command = (*KVDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*KVDeleteCommand)(nil) +) type KVDeleteCommand struct { *BaseCommand diff --git a/command/kv_destroy.go b/command/kv_destroy.go index 64ed284d18..6902460787 100644 --- a/command/kv_destroy.go +++ b/command/kv_destroy.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVDestroyCommand)(nil) -var _ cli.CommandAutocomplete = (*KVDestroyCommand)(nil) +var ( + _ cli.Command = (*KVDestroyCommand)(nil) + _ cli.CommandAutocomplete = (*KVDestroyCommand)(nil) +) type KVDestroyCommand struct { *BaseCommand diff --git a/command/kv_enable_versioning.go b/command/kv_enable_versioning.go index 1d854f1f24..9c2a601432 100644 --- a/command/kv_enable_versioning.go +++ b/command/kv_enable_versioning.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVEnableVersioningCommand)(nil) -var _ cli.CommandAutocomplete = (*KVEnableVersioningCommand)(nil) +var ( + _ cli.Command = (*KVEnableVersioningCommand)(nil) + _ cli.CommandAutocomplete = (*KVEnableVersioningCommand)(nil) +) type KVEnableVersioningCommand struct { *BaseCommand diff --git a/command/kv_get.go b/command/kv_get.go index 557ea3179c..e0a519c5d1 100644 --- a/command/kv_get.go +++ b/command/kv_get.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVGetCommand)(nil) -var _ cli.CommandAutocomplete = (*KVGetCommand)(nil) +var ( + _ cli.Command = (*KVGetCommand)(nil) + _ cli.CommandAutocomplete = (*KVGetCommand)(nil) +) type KVGetCommand struct { *BaseCommand diff --git a/command/kv_list.go b/command/kv_list.go index b6386b838f..ab2a8e1801 100644 --- a/command/kv_list.go +++ b/command/kv_list.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVListCommand)(nil) -var _ cli.CommandAutocomplete = (*KVListCommand)(nil) +var ( + _ cli.Command = (*KVListCommand)(nil) + _ cli.CommandAutocomplete = (*KVListCommand)(nil) +) type KVListCommand struct { *BaseCommand @@ -75,7 +77,7 @@ func (c *KVListCommand) Run(args []string) int { // Append trailing slash path := args[0] - if !strings.HasSuffix(path , "/") { + if !strings.HasSuffix(path, "/") { path += "/" } diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go index 8ff3a5655e..03eb70d126 100644 --- a/command/kv_metadata_delete.go +++ b/command/kv_metadata_delete.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVMetadataDeleteCommand)(nil) -var _ cli.CommandAutocomplete = (*KVMetadataDeleteCommand)(nil) +var ( + _ cli.Command = (*KVMetadataDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataDeleteCommand)(nil) +) type KVMetadataDeleteCommand struct { *BaseCommand diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go index a9bfc77eaa..f5be942c36 100644 --- a/command/kv_metadata_get.go +++ b/command/kv_metadata_get.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVMetadataGetCommand)(nil) -var _ cli.CommandAutocomplete = (*KVMetadataGetCommand)(nil) +var ( + _ cli.Command = (*KVMetadataGetCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataGetCommand)(nil) +) type KVMetadataGetCommand struct { *BaseCommand diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 20cd9579af..69529d066c 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVMetadataPutCommand)(nil) -var _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) +var ( + _ cli.Command = (*KVMetadataPutCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) +) type KVMetadataPutCommand struct { *BaseCommand diff --git a/command/kv_patch.go b/command/kv_patch.go index 5c6dfde254..73a5e42a62 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVPatchCommand)(nil) -var _ cli.CommandAutocomplete = (*KVPatchCommand)(nil) +var ( + _ cli.Command = (*KVPatchCommand)(nil) + _ cli.CommandAutocomplete = (*KVPatchCommand)(nil) +) type KVPatchCommand struct { *BaseCommand diff --git a/command/kv_put.go b/command/kv_put.go index b4ba76aafa..a33b67f86f 100644 --- a/command/kv_put.go +++ b/command/kv_put.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVPutCommand)(nil) -var _ cli.CommandAutocomplete = (*KVPutCommand)(nil) +var ( + _ cli.Command = (*KVPutCommand)(nil) + _ cli.CommandAutocomplete = (*KVPutCommand)(nil) +) type KVPutCommand struct { *BaseCommand diff --git a/command/kv_rollback.go b/command/kv_rollback.go index e050aaeb37..42bd63faa8 100644 --- a/command/kv_rollback.go +++ b/command/kv_rollback.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVRollbackCommand)(nil) -var _ cli.CommandAutocomplete = (*KVRollbackCommand)(nil) +var ( + _ cli.Command = (*KVRollbackCommand)(nil) + _ cli.CommandAutocomplete = (*KVRollbackCommand)(nil) +) type KVRollbackCommand struct { *BaseCommand diff --git a/command/kv_test.go b/command/kv_test.go index 5e180cbb27..83af43a4dd 100644 --- a/command/kv_test.go +++ b/command/kv_test.go @@ -174,7 +174,6 @@ func TestKVPutCommand(t *testing.T) { if !strings.Contains(combined, "check-and-set parameter did not match the current version") { t.Errorf("expected %q to contain %q", combined, "check-and-set parameter did not match the current version") } - }) t.Run("v1_data", func(t *testing.T) { diff --git a/command/kv_undelete.go b/command/kv_undelete.go index 7c11e8ce5d..5c86d5df47 100644 --- a/command/kv_undelete.go +++ b/command/kv_undelete.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*KVUndeleteCommand)(nil) -var _ cli.CommandAutocomplete = (*KVUndeleteCommand)(nil) +var ( + _ cli.Command = (*KVUndeleteCommand)(nil) + _ cli.CommandAutocomplete = (*KVUndeleteCommand)(nil) +) type KVUndeleteCommand struct { *BaseCommand diff --git a/command/lease_lookup.go b/command/lease_lookup.go index 4d5aa6da33..c72c6a174f 100644 --- a/command/lease_lookup.go +++ b/command/lease_lookup.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*LeaseLookupCommand)(nil) -var _ cli.CommandAutocomplete = (*LeaseLookupCommand)(nil) +var ( + _ cli.Command = (*LeaseLookupCommand)(nil) + _ cli.CommandAutocomplete = (*LeaseLookupCommand)(nil) +) type LeaseLookupCommand struct { *BaseCommand diff --git a/command/lease_renew.go b/command/lease_renew.go index a13d913221..13eb95ed00 100644 --- a/command/lease_renew.go +++ b/command/lease_renew.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*LeaseRenewCommand)(nil) -var _ cli.CommandAutocomplete = (*LeaseRenewCommand)(nil) +var ( + _ cli.Command = (*LeaseRenewCommand)(nil) + _ cli.CommandAutocomplete = (*LeaseRenewCommand)(nil) +) type LeaseRenewCommand struct { *BaseCommand diff --git a/command/lease_revoke.go b/command/lease_revoke.go index 6077f053b7..1fc90eff7c 100644 --- a/command/lease_revoke.go +++ b/command/lease_revoke.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*LeaseRevokeCommand)(nil) -var _ cli.CommandAutocomplete = (*LeaseRevokeCommand)(nil) +var ( + _ cli.Command = (*LeaseRevokeCommand)(nil) + _ cli.CommandAutocomplete = (*LeaseRevokeCommand)(nil) +) type LeaseRevokeCommand struct { *BaseCommand diff --git a/command/list.go b/command/list.go index a1d074f5ac..b6d3fabe32 100644 --- a/command/list.go +++ b/command/list.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*ListCommand)(nil) -var _ cli.CommandAutocomplete = (*ListCommand)(nil) +var ( + _ cli.Command = (*ListCommand)(nil) + _ cli.CommandAutocomplete = (*ListCommand)(nil) +) type ListCommand struct { *BaseCommand @@ -77,7 +79,7 @@ func (c *ListCommand) Run(args []string) int { // Append trailing slash path := args[0] - if !strings.HasSuffix(path , "/") { + if !strings.HasSuffix(path, "/") { path += "/" } diff --git a/command/monitor.go b/command/monitor.go index d78b1ffbec..ec84666de6 100644 --- a/command/monitor.go +++ b/command/monitor.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*MonitorCommand)(nil) -var _ cli.CommandAutocomplete = (*MonitorCommand)(nil) +var ( + _ cli.Command = (*MonitorCommand)(nil) + _ cli.CommandAutocomplete = (*MonitorCommand)(nil) +) type MonitorCommand struct { *BaseCommand diff --git a/command/namespace_create.go b/command/namespace_create.go index 9937ee0489..80ce589f96 100644 --- a/command/namespace_create.go +++ b/command/namespace_create.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*NamespaceCreateCommand)(nil) -var _ cli.CommandAutocomplete = (*NamespaceCreateCommand)(nil) +var ( + _ cli.Command = (*NamespaceCreateCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceCreateCommand)(nil) +) type NamespaceCreateCommand struct { *BaseCommand diff --git a/command/namespace_delete.go b/command/namespace_delete.go index 7061b64602..a5d1892936 100644 --- a/command/namespace_delete.go +++ b/command/namespace_delete.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*NamespaceDeleteCommand)(nil) -var _ cli.CommandAutocomplete = (*NamespaceDeleteCommand)(nil) +var ( + _ cli.Command = (*NamespaceDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceDeleteCommand)(nil) +) type NamespaceDeleteCommand struct { *BaseCommand diff --git a/command/namespace_list.go b/command/namespace_list.go index e5352f5077..0b05f502d2 100644 --- a/command/namespace_list.go +++ b/command/namespace_list.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*NamespaceListCommand)(nil) -var _ cli.CommandAutocomplete = (*NamespaceListCommand)(nil) +var ( + _ cli.Command = (*NamespaceListCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceListCommand)(nil) +) type NamespaceListCommand struct { *BaseCommand diff --git a/command/namespace_lookup.go b/command/namespace_lookup.go index 718d0452da..98d710ea53 100644 --- a/command/namespace_lookup.go +++ b/command/namespace_lookup.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*NamespaceLookupCommand)(nil) -var _ cli.CommandAutocomplete = (*NamespaceLookupCommand)(nil) +var ( + _ cli.Command = (*NamespaceLookupCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceLookupCommand)(nil) +) type NamespaceLookupCommand struct { *BaseCommand diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index 8368d6ef6e..96d47e41cd 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -15,8 +15,10 @@ import ( const OperatorDiagnoseEnableEnv = "VAULT_DIAGNOSE" -var _ cli.Command = (*OperatorDiagnoseCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorDiagnoseCommand)(nil) +var ( + _ cli.Command = (*OperatorDiagnoseCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorDiagnoseCommand)(nil) +) type OperatorDiagnoseCommand struct { *BaseCommand @@ -98,11 +100,13 @@ func (c *OperatorDiagnoseCommand) AutocompleteFlags() complete.Flags { return c.Flags().Completions() } -const status_unknown = "[ ] " -const status_ok = "\u001b[32m[ ok ]\u001b[0m " -const status_failed = "\u001b[31m[failed]\u001b[0m " -const status_warn = "\u001b[33m[ warn ]\u001b[0m " -const same_line = "\u001b[F" +const ( + status_unknown = "[ ] " + status_ok = "\u001b[32m[ ok ]\u001b[0m " + status_failed = "\u001b[31m[failed]\u001b[0m " + status_warn = "\u001b[33m[ warn ]\u001b[0m " + same_line = "\u001b[F" +) func (c *OperatorDiagnoseCommand) Run(args []string) int { f := c.Flags() diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go index c807613cb2..a9f9117feb 100644 --- a/command/operator_generate_root.go +++ b/command/operator_generate_root.go @@ -20,8 +20,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorGenerateRootCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorGenerateRootCommand)(nil) +var ( + _ cli.Command = (*OperatorGenerateRootCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorGenerateRootCommand)(nil) +) type generateRootKind int diff --git a/command/operator_init.go b/command/operator_init.go index 09b4748e5e..92b39b879f 100644 --- a/command/operator_init.go +++ b/command/operator_init.go @@ -14,8 +14,10 @@ import ( consulapi "github.com/hashicorp/consul/api" ) -var _ cli.Command = (*OperatorInitCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorInitCommand)(nil) +var ( + _ cli.Command = (*OperatorInitCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorInitCommand)(nil) +) type OperatorInitCommand struct { *BaseCommand diff --git a/command/operator_key_status.go b/command/operator_key_status.go index c5b61c8c0a..e015fb0e32 100644 --- a/command/operator_key_status.go +++ b/command/operator_key_status.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorKeyStatusCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorKeyStatusCommand)(nil) +var ( + _ cli.Command = (*OperatorKeyStatusCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorKeyStatusCommand)(nil) +) type OperatorKeyStatusCommand struct { *BaseCommand diff --git a/command/operator_migrate.go b/command/operator_migrate.go index 9ece5914e8..c243864e4c 100644 --- a/command/operator_migrate.go +++ b/command/operator_migrate.go @@ -24,8 +24,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorMigrateCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil) +var ( + _ cli.Command = (*OperatorMigrateCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil) +) var errAbort = errors.New("Migration aborted") @@ -212,7 +214,6 @@ func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.B } entry, err := from.Get(ctx, path) - if err != nil { return errwrap.Wrapf("error reading entry: {{err}}", err) } diff --git a/command/operator_migrate_test.go b/command/operator_migrate_test.go index 8b30d3e118..0d8a4454ad 100644 --- a/command/operator_migrate_test.go +++ b/command/operator_migrate_test.go @@ -119,7 +119,7 @@ storage_source "src_type" { storage_destination "dest_type" { path = "dest_path" -}`), 0644) +}`), 0o644) defer os.Remove(cfgName) expCfg := &migratorConfig{ @@ -145,7 +145,7 @@ storage_destination "dest_type" { } verifyBad := func(cfg string) { - ioutil.WriteFile(cfgName, []byte(cfg), 0644) + ioutil.WriteFile(cfgName, []byte(cfg), 0o644) _, err := cmd.loadMigratorConfig(cfgName) if err == nil { t.Fatalf("expected error but none received from: %v", cfg) @@ -191,7 +191,6 @@ storage_destination "dest_type" { storage_destination "dest_type2" { path = "dest_path" }`) - }) t.Run("DFS Scan", func(t *testing.T) { s, _ := physicalBackends["inmem"](map[string]string{}, nil) diff --git a/command/operator_raft_autopilot_get_config.go b/command/operator_raft_autopilot_get_config.go index e4b2d95f6c..f0a30e1e1f 100644 --- a/command/operator_raft_autopilot_get_config.go +++ b/command/operator_raft_autopilot_get_config.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftAutopilotGetConfigCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftAutopilotGetConfigCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftAutopilotGetConfigCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftAutopilotGetConfigCommand)(nil) +) type OperatorRaftAutopilotGetConfigCommand struct { *BaseCommand diff --git a/command/operator_raft_autopilot_set_config.go b/command/operator_raft_autopilot_set_config.go index 3344ece176..c8e1366281 100644 --- a/command/operator_raft_autopilot_set_config.go +++ b/command/operator_raft_autopilot_set_config.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftAutopilotSetConfigCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftAutopilotSetConfigCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftAutopilotSetConfigCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftAutopilotSetConfigCommand)(nil) +) type OperatorRaftAutopilotSetConfigCommand struct { *BaseCommand diff --git a/command/operator_raft_autopilot_state.go b/command/operator_raft_autopilot_state.go index ba976caa99..8a530dc75e 100644 --- a/command/operator_raft_autopilot_state.go +++ b/command/operator_raft_autopilot_state.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftAutopilotStateCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftAutopilotStateCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftAutopilotStateCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftAutopilotStateCommand)(nil) +) type OperatorRaftAutopilotStateCommand struct { *BaseCommand diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go index 52f0c7d8e2..37bc77eedb 100644 --- a/command/operator_raft_join.go +++ b/command/operator_raft_join.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftJoinCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftJoinCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftJoinCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftJoinCommand)(nil) +) type OperatorRaftJoinCommand struct { flagRetry bool diff --git a/command/operator_raft_listpeers.go b/command/operator_raft_listpeers.go index 0d5379018a..2c80112ec3 100644 --- a/command/operator_raft_listpeers.go +++ b/command/operator_raft_listpeers.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftListPeersCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftListPeersCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftListPeersCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftListPeersCommand)(nil) +) type OperatorRaftListPeersCommand struct { *BaseCommand diff --git a/command/operator_raft_remove_peer.go b/command/operator_raft_remove_peer.go index 7018bae20a..6f7e837474 100644 --- a/command/operator_raft_remove_peer.go +++ b/command/operator_raft_remove_peer.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftRemovePeerCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftRemovePeerCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftRemovePeerCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftRemovePeerCommand)(nil) +) type OperatorRaftRemovePeerCommand struct { *BaseCommand diff --git a/command/operator_raft_snapshot_restore.go b/command/operator_raft_snapshot_restore.go index bf5c6eb5b1..3755d6cbfd 100644 --- a/command/operator_raft_snapshot_restore.go +++ b/command/operator_raft_snapshot_restore.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftSnapshotRestoreCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftSnapshotRestoreCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftSnapshotRestoreCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotRestoreCommand)(nil) +) type OperatorRaftSnapshotRestoreCommand struct { flagForce bool diff --git a/command/operator_raft_snapshot_save.go b/command/operator_raft_snapshot_save.go index ebe660abe6..825bb303a1 100644 --- a/command/operator_raft_snapshot_save.go +++ b/command/operator_raft_snapshot_save.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRaftSnapshotSaveCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRaftSnapshotSaveCommand)(nil) +var ( + _ cli.Command = (*OperatorRaftSnapshotSaveCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotSaveCommand)(nil) +) type OperatorRaftSnapshotSaveCommand struct { *BaseCommand @@ -74,7 +76,7 @@ func (c *OperatorRaftSnapshotSaveCommand) Run(args []string) int { w := &lazyOpenWriter{ openFunc: func() (io.WriteCloser, error) { - return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) }, } @@ -91,7 +93,7 @@ func (c *OperatorRaftSnapshotSaveCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Error taking the snapshot: %s", err)) return 2 } - + err = w.Close() if err != nil { c.UI.Error(fmt.Sprintf("Error taking the snapshot: %s", err)) @@ -102,10 +104,9 @@ func (c *OperatorRaftSnapshotSaveCommand) Run(args []string) int { type lazyOpenWriter struct { openFunc func() (io.WriteCloser, error) - writer io.WriteCloser + writer io.WriteCloser } - func (l *lazyOpenWriter) Write(p []byte) (n int, err error) { if l.writer == nil { var err error @@ -122,4 +123,4 @@ func (l *lazyOpenWriter) Close() error { return l.writer.Close() } return nil -} \ No newline at end of file +} diff --git a/command/operator_rekey.go b/command/operator_rekey.go index d437a92591..630219bb5d 100644 --- a/command/operator_rekey.go +++ b/command/operator_rekey.go @@ -15,8 +15,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRekeyCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRekeyCommand)(nil) +var ( + _ cli.Command = (*OperatorRekeyCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRekeyCommand)(nil) +) type OperatorRekeyCommand struct { *BaseCommand diff --git a/command/operator_seal.go b/command/operator_seal.go index 90161e241c..9f2ec6656e 100644 --- a/command/operator_seal.go +++ b/command/operator_seal.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorSealCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorSealCommand)(nil) +var ( + _ cli.Command = (*OperatorSealCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorSealCommand)(nil) +) type OperatorSealCommand struct { *BaseCommand diff --git a/command/operator_step_down.go b/command/operator_step_down.go index 63208faf04..dea2c97178 100644 --- a/command/operator_step_down.go +++ b/command/operator_step_down.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorStepDownCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorStepDownCommand)(nil) +var ( + _ cli.Command = (*OperatorStepDownCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorStepDownCommand)(nil) +) type OperatorStepDownCommand struct { *BaseCommand diff --git a/command/operator_unseal.go b/command/operator_unseal.go index c106c1e166..50052a690a 100644 --- a/command/operator_unseal.go +++ b/command/operator_unseal.go @@ -12,8 +12,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorUnsealCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorUnsealCommand)(nil) +var ( + _ cli.Command = (*OperatorUnsealCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorUnsealCommand)(nil) +) type OperatorUnsealCommand struct { *BaseCommand diff --git a/command/operator_usage.go b/command/operator_usage.go index 93bc5d061f..a6b4b59ccc 100644 --- a/command/operator_usage.go +++ b/command/operator_usage.go @@ -14,8 +14,10 @@ import ( "github.com/ryanuber/columnize" ) -var _ cli.Command = (*OperatorUsageCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorUsageCommand)(nil) +var ( + _ cli.Command = (*OperatorUsageCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorUsageCommand)(nil) +) type OperatorUsageCommand struct { *BaseCommand @@ -240,7 +242,6 @@ func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageRes } return ret, nil - } func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []string { diff --git a/command/path_help.go b/command/path_help.go index d50fe8eab4..1f540a5c6a 100644 --- a/command/path_help.go +++ b/command/path_help.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PathHelpCommand)(nil) -var _ cli.CommandAutocomplete = (*PathHelpCommand)(nil) +var ( + _ cli.Command = (*PathHelpCommand)(nil) + _ cli.CommandAutocomplete = (*PathHelpCommand)(nil) +) var pathHelpVaultSealedMessage = strings.TrimSpace(` Error: Vault is sealed. diff --git a/command/pgp_test.go b/command/pgp_test.go index 4cfda985b7..ac39d02887 100644 --- a/command/pgp_test.go +++ b/command/pgp_test.go @@ -34,7 +34,7 @@ func getPubKeyFiles(t *testing.T) (string, []string, error) { if err != nil { t.Fatalf("Error decoding bytes for public key 1: %s", err) } - err = ioutil.WriteFile(pubFiles[0], pub1Bytes, 0755) + err = ioutil.WriteFile(pubFiles[0], pub1Bytes, 0o755) if err != nil { t.Fatalf("Error writing pub key 1 to temp file: %s", err) } @@ -42,7 +42,7 @@ func getPubKeyFiles(t *testing.T) (string, []string, error) { if err != nil { t.Fatalf("Error decoding bytes for public key 2: %s", err) } - err = ioutil.WriteFile(pubFiles[1], pub2Bytes, 0755) + err = ioutil.WriteFile(pubFiles[1], pub2Bytes, 0o755) if err != nil { t.Fatalf("Error writing pub key 2 to temp file: %s", err) } @@ -50,11 +50,11 @@ func getPubKeyFiles(t *testing.T) (string, []string, error) { if err != nil { t.Fatalf("Error decoding bytes for public key 3: %s", err) } - err = ioutil.WriteFile(pubFiles[2], pub3Bytes, 0755) + err = ioutil.WriteFile(pubFiles[2], pub3Bytes, 0o755) if err != nil { t.Fatalf("Error writing pub key 3 to temp file: %s", err) } - err = ioutil.WriteFile(pubFiles[3], []byte(pgpkeys.TestAAPubKey1), 0755) + err = ioutil.WriteFile(pubFiles[3], []byte(pgpkeys.TestAAPubKey1), 0o755) if err != nil { t.Fatalf("Error writing aa pub key 1 to temp file: %s", err) } diff --git a/command/plugin_deregister.go b/command/plugin_deregister.go index 7c5e4a16e8..7f0c4a614b 100644 --- a/command/plugin_deregister.go +++ b/command/plugin_deregister.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PluginDeregisterCommand)(nil) -var _ cli.CommandAutocomplete = (*PluginDeregisterCommand)(nil) +var ( + _ cli.Command = (*PluginDeregisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginDeregisterCommand)(nil) +) type PluginDeregisterCommand struct { *BaseCommand diff --git a/command/plugin_info.go b/command/plugin_info.go index bed47e3184..a5676e7304 100644 --- a/command/plugin_info.go +++ b/command/plugin_info.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PluginInfoCommand)(nil) -var _ cli.CommandAutocomplete = (*PluginInfoCommand)(nil) +var ( + _ cli.Command = (*PluginInfoCommand)(nil) + _ cli.CommandAutocomplete = (*PluginInfoCommand)(nil) +) type PluginInfoCommand struct { *BaseCommand diff --git a/command/plugin_list.go b/command/plugin_list.go index 837a165c5b..40cf5a8fd8 100644 --- a/command/plugin_list.go +++ b/command/plugin_list.go @@ -11,8 +11,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PluginListCommand)(nil) -var _ cli.CommandAutocomplete = (*PluginListCommand)(nil) +var ( + _ cli.Command = (*PluginListCommand)(nil) + _ cli.CommandAutocomplete = (*PluginListCommand)(nil) +) type PluginListCommand struct { *BaseCommand diff --git a/command/plugin_register.go b/command/plugin_register.go index f53562bb6b..4a1eb19a5b 100644 --- a/command/plugin_register.go +++ b/command/plugin_register.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PluginRegisterCommand)(nil) -var _ cli.CommandAutocomplete = (*PluginRegisterCommand)(nil) +var ( + _ cli.Command = (*PluginRegisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRegisterCommand)(nil) +) type PluginRegisterCommand struct { *BaseCommand diff --git a/command/plugin_reload.go b/command/plugin_reload.go index 06f7a50328..ae3c663869 100644 --- a/command/plugin_reload.go +++ b/command/plugin_reload.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PluginReloadCommand)(nil) -var _ cli.CommandAutocomplete = (*PluginReloadCommand)(nil) +var ( + _ cli.Command = (*PluginReloadCommand)(nil) + _ cli.CommandAutocomplete = (*PluginReloadCommand)(nil) +) type PluginReloadCommand struct { *BaseCommand diff --git a/command/plugin_reload_status.go b/command/plugin_reload_status.go index 4579b5e897..319d539c15 100644 --- a/command/plugin_reload_status.go +++ b/command/plugin_reload_status.go @@ -2,14 +2,17 @@ package command import ( "fmt" + "strings" + "github.com/hashicorp/vault/api" "github.com/mitchellh/cli" "github.com/posener/complete" - "strings" ) -var _ cli.Command = (*PluginReloadCommand)(nil) -var _ cli.CommandAutocomplete = (*PluginReloadCommand)(nil) +var ( + _ cli.Command = (*PluginReloadCommand)(nil) + _ cli.CommandAutocomplete = (*PluginReloadCommand)(nil) +) type PluginReloadStatusCommand struct { *BaseCommand @@ -73,7 +76,6 @@ func (c *PluginReloadStatusCommand) Run(args []string) int { r, err := client.Sys().ReloadPluginStatus(&api.ReloadPluginStatusInput{ ReloadID: reloadId, }) - if err != nil { c.UI.Error(fmt.Sprintf("Error retrieving plugin reload status: %s", err)) return 2 diff --git a/command/plugin_reload_test.go b/command/plugin_reload_test.go index 1f7191a5c1..99b0c03c7f 100644 --- a/command/plugin_reload_test.go +++ b/command/plugin_reload_test.go @@ -115,9 +115,7 @@ func TestPluginReloadCommand_Run(t *testing.T) { if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) } - }) - } func TestPluginReloadStatusCommand_Run(t *testing.T) { diff --git a/command/plugin_test.go b/command/plugin_test.go index e45ea69335..786abdb52f 100644 --- a/command/plugin_test.go +++ b/command/plugin_test.go @@ -42,7 +42,7 @@ func testPluginCreate(tb testing.TB, dir, name string) (string, string) { tb.Helper() pth := dir + "/" + name - if err := ioutil.WriteFile(pth, nil, 0755); err != nil { + if err := ioutil.WriteFile(pth, nil, 0o755); err != nil { tb.Fatal(err) } diff --git a/command/policy_delete.go b/command/policy_delete.go index e74030640f..76fa9a21d0 100644 --- a/command/policy_delete.go +++ b/command/policy_delete.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PolicyDeleteCommand)(nil) -var _ cli.CommandAutocomplete = (*PolicyDeleteCommand)(nil) +var ( + _ cli.Command = (*PolicyDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyDeleteCommand)(nil) +) type PolicyDeleteCommand struct { *BaseCommand diff --git a/command/policy_fmt.go b/command/policy_fmt.go index e9e7669032..7912c10643 100644 --- a/command/policy_fmt.go +++ b/command/policy_fmt.go @@ -13,8 +13,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PolicyFmtCommand)(nil) -var _ cli.CommandAutocomplete = (*PolicyFmtCommand)(nil) +var ( + _ cli.Command = (*PolicyFmtCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyFmtCommand)(nil) +) type PolicyFmtCommand struct { *BaseCommand @@ -101,7 +103,7 @@ func (c *PolicyFmtCommand) Run(args []string) int { } // Write them back out - if err := ioutil.WriteFile(path, result, 0644); err != nil { + if err := ioutil.WriteFile(path, result, 0o644); err != nil { c.UI.Error(fmt.Sprintf("Error writing result: %s", err)) return 1 } diff --git a/command/policy_list.go b/command/policy_list.go index 43bd5287da..53e85df0fd 100644 --- a/command/policy_list.go +++ b/command/policy_list.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PolicyListCommand)(nil) -var _ cli.CommandAutocomplete = (*PolicyListCommand)(nil) +var ( + _ cli.Command = (*PolicyListCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyListCommand)(nil) +) type PolicyListCommand struct { *BaseCommand diff --git a/command/policy_read.go b/command/policy_read.go index d10a5d1027..31777c5d5a 100644 --- a/command/policy_read.go +++ b/command/policy_read.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PolicyReadCommand)(nil) -var _ cli.CommandAutocomplete = (*PolicyReadCommand)(nil) +var ( + _ cli.Command = (*PolicyReadCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyReadCommand)(nil) +) type PolicyReadCommand struct { *BaseCommand diff --git a/command/policy_write.go b/command/policy_write.go index 9f6cb2222d..50a1ccf4cc 100644 --- a/command/policy_write.go +++ b/command/policy_write.go @@ -11,8 +11,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PolicyWriteCommand)(nil) -var _ cli.CommandAutocomplete = (*PolicyWriteCommand)(nil) +var ( + _ cli.Command = (*PolicyWriteCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyWriteCommand)(nil) +) type PolicyWriteCommand struct { *BaseCommand diff --git a/command/print.go b/command/print.go index 0ec6061a5a..dace6ac951 100644 --- a/command/print.go +++ b/command/print.go @@ -7,8 +7,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PrintCommand)(nil) -var _ cli.CommandAutocomplete = (*PrintCommand)(nil) +var ( + _ cli.Command = (*PrintCommand)(nil) + _ cli.CommandAutocomplete = (*PrintCommand)(nil) +) type PrintCommand struct { *BaseCommand diff --git a/command/print_token.go b/command/print_token.go index 1a6a8ef154..efe5aeedd3 100644 --- a/command/print_token.go +++ b/command/print_token.go @@ -7,8 +7,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*PrintTokenCommand)(nil) -var _ cli.CommandAutocomplete = (*PrintTokenCommand)(nil) +var ( + _ cli.Command = (*PrintTokenCommand)(nil) + _ cli.CommandAutocomplete = (*PrintTokenCommand)(nil) +) type PrintTokenCommand struct { *BaseCommand diff --git a/command/read.go b/command/read.go index 0b36608387..b12eb3f60a 100644 --- a/command/read.go +++ b/command/read.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*ReadCommand)(nil) -var _ cli.CommandAutocomplete = (*ReadCommand)(nil) +var ( + _ cli.Command = (*ReadCommand)(nil) + _ cli.CommandAutocomplete = (*ReadCommand)(nil) +) type ReadCommand struct { *BaseCommand diff --git a/command/rotate.go b/command/rotate.go index d0f0a2db41..f366a6133b 100644 --- a/command/rotate.go +++ b/command/rotate.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*OperatorRotateCommand)(nil) -var _ cli.CommandAutocomplete = (*OperatorRotateCommand)(nil) +var ( + _ cli.Command = (*OperatorRotateCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRotateCommand)(nil) +) type OperatorRotateCommand struct { *BaseCommand diff --git a/command/secrets_disable.go b/command/secrets_disable.go index 7002874409..47a61c5fe0 100644 --- a/command/secrets_disable.go +++ b/command/secrets_disable.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*SecretsDisableCommand)(nil) -var _ cli.CommandAutocomplete = (*SecretsDisableCommand)(nil) +var ( + _ cli.Command = (*SecretsDisableCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsDisableCommand)(nil) +) type SecretsDisableCommand struct { *BaseCommand diff --git a/command/secrets_enable.go b/command/secrets_enable.go index 48a7f65ef4..cb4671ba39 100644 --- a/command/secrets_enable.go +++ b/command/secrets_enable.go @@ -13,8 +13,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*SecretsEnableCommand)(nil) -var _ cli.CommandAutocomplete = (*SecretsEnableCommand)(nil) +var ( + _ cli.Command = (*SecretsEnableCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsEnableCommand)(nil) +) type SecretsEnableCommand struct { *BaseCommand diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go index 48dc923254..a8b4084427 100644 --- a/command/secrets_enable_test.go +++ b/command/secrets_enable_test.go @@ -10,11 +10,9 @@ import ( "github.com/mitchellh/cli" ) -var ( - // logicalBackendAdjustmentFactor is set to 1 for the database backend - // which is a plugin but not found in go.mod files - logicalBackendAdjustmentFactor = 1 -) +// logicalBackendAdjustmentFactor is set to 1 for the database backend +// which is a plugin but not found in go.mod files +var logicalBackendAdjustmentFactor = 1 func testSecretsEnableCommand(tb testing.TB) (*cli.MockUi, *SecretsEnableCommand) { tb.Helper() diff --git a/command/secrets_list.go b/command/secrets_list.go index 096f0703d3..e9ce1ff31e 100644 --- a/command/secrets_list.go +++ b/command/secrets_list.go @@ -11,8 +11,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*SecretsListCommand)(nil) -var _ cli.CommandAutocomplete = (*SecretsListCommand)(nil) +var ( + _ cli.Command = (*SecretsListCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsListCommand)(nil) +) type SecretsListCommand struct { *BaseCommand diff --git a/command/secrets_move.go b/command/secrets_move.go index a8f604bee0..a04ec090b3 100644 --- a/command/secrets_move.go +++ b/command/secrets_move.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*SecretsMoveCommand)(nil) -var _ cli.CommandAutocomplete = (*SecretsMoveCommand)(nil) +var ( + _ cli.Command = (*SecretsMoveCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsMoveCommand)(nil) +) type SecretsMoveCommand struct { *BaseCommand diff --git a/command/secrets_tune.go b/command/secrets_tune.go index 3465b4995b..c768c7ea88 100644 --- a/command/secrets_tune.go +++ b/command/secrets_tune.go @@ -12,8 +12,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*SecretsTuneCommand)(nil) -var _ cli.CommandAutocomplete = (*SecretsTuneCommand)(nil) +var ( + _ cli.Command = (*SecretsTuneCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsTuneCommand)(nil) +) type SecretsTuneCommand struct { *BaseCommand diff --git a/command/server.go b/command/server.go index 784a53e9ab..1a5fc82403 100644 --- a/command/server.go +++ b/command/server.go @@ -54,8 +54,10 @@ import ( "google.golang.org/grpc/grpclog" ) -var _ cli.Command = (*ServerCommand)(nil) -var _ cli.CommandAutocomplete = (*ServerCommand)(nil) +var ( + _ cli.Command = (*ServerCommand)(nil) + _ cli.CommandAutocomplete = (*ServerCommand)(nil) +) var memProfilerEnabled = false @@ -1227,7 +1229,7 @@ func (c *ServerCommand) Run(args []string) int { }), }) var sealInfoKeys []string - var sealInfoMap = map[string]string{} + sealInfoMap := map[string]string{} wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger) if sealConfigError != nil { if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { @@ -1244,7 +1246,7 @@ func (c *ServerCommand) Run(args []string) int { }) } - var infoPrefix = "" + infoPrefix := "" if configSeal.Disabled { unwrapSeal = seal infoPrefix = "Old " @@ -1793,7 +1795,8 @@ CLUSTER_SYNTHESIS_COMPLETE: "Development mode should NOT be used in production installations!")) c.UI.Warn("") }) - })} + }), + } c.logger.RegisterSink(qw) } @@ -2244,7 +2247,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m return 1 } - if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil { + if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0o755); err != nil { c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err)) return 1 } @@ -2476,7 +2479,7 @@ func (c *ServerCommand) storePidFile(pidPath string) error { } // Open the PID file - pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) if err != nil { return errwrap.Wrapf("could not open pid file: {{err}}", err) } @@ -2539,7 +2542,6 @@ type StorageMigrationStatus struct { func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) { entry, err := b.Get(context.Background(), storageMigrationLock) - if err != nil { return nil, err } diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index f548d32d0f..e514090228 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -247,7 +247,7 @@ func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) { } func testParseEntropy(t *testing.T, oss bool) { - var tests = []struct { + tests := []struct { inConfig string outErr error outEntropy configutil.Entropy @@ -627,7 +627,8 @@ func testConfig_Sanitized(t *testing.T) { "cluster_addr": "top_level_cluster_addr", "disable_clustering": true, "redirect_addr": "top_level_api_addr", - "type": "consul"}, + "type": "consul", + }, "listeners": []interface{}{ map[string]interface{}{ "config": map[string]interface{}{ @@ -771,7 +772,7 @@ func testParseSeals(t *testing.T) { }, }, Seals: []*configutil.KMS{ - &configutil.KMS{ + { Type: "pkcs11", Purpose: []string{"many", "purposes"}, Config: map[string]string{ @@ -786,7 +787,7 @@ func testParseSeals(t *testing.T) { "generate_key": "true", }, }, - &configutil.KMS{ + { Type: "pkcs11", Purpose: []string{"single"}, Disabled: true, diff --git a/command/server/config_util.go b/command/server/config_util.go index a1370f6ab6..e24073ec9b 100644 --- a/command/server/config_util.go +++ b/command/server/config_util.go @@ -6,8 +6,7 @@ import ( "github.com/hashicorp/hcl/hcl/ast" ) -type entConfig struct { -} +type entConfig struct{} func (ec *entConfig) parseConfig(list *ast.ObjectList) error { return nil diff --git a/command/server/listener.go b/command/server/listener.go index f376ba9b20..eca313d72d 100644 --- a/command/server/listener.go +++ b/command/server/listener.go @@ -1,14 +1,15 @@ package server import ( - "github.com/hashicorp/errwrap" - // We must import sha512 so that it registers with the runtime so that - // certificates that use it can be parsed. _ "crypto/sha512" "fmt" "io" "net" + "github.com/hashicorp/errwrap" + // We must import sha512 so that it registers with the runtime so that + // certificates that use it can be parsed. + "github.com/hashicorp/vault/helper/proxyutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/reloadutil" diff --git a/command/server/listener_tcp_test.go b/command/server/listener_tcp_test.go index 12e0c7dcb7..791bdaffcb 100644 --- a/command/server/listener_tcp_test.go +++ b/command/server/listener_tcp_test.go @@ -75,7 +75,6 @@ func TestTCPListener_tls(t *testing.T) { conf.Certificates = []tls.Certificate{clientCert} } conn, err := tls.Dial("tcp", ln.Addr().String(), conf) - if err != nil { return nil, err } @@ -158,7 +157,6 @@ func TestTCPListener_tls13(t *testing.T) { conf.Certificates = []tls.Certificate{clientCert} } conn, err := tls.Dial("tcp", ln.Addr().String(), conf) - if err != nil { return nil, err } diff --git a/command/server/server_seal_transit_acc_test.go b/command/server/server_seal_transit_acc_test.go index e6c56c725a..e7d4d00e5d 100644 --- a/command/server/server_seal_transit_acc_test.go +++ b/command/server/server_seal_transit_acc_test.go @@ -133,8 +133,10 @@ func prepareTestContainer(t *testing.T) (func(), *DockerVaultConfig) { runner, err := docker.NewServiceRunner(docker.RunOptions{ ImageRepo: "vault", ImageTag: "latest", - Cmd: []string{"server", "-log-level=trace", "-dev", fmt.Sprintf("-dev-root-token-id=%s", rootToken), - "-dev-listen-address=0.0.0.0:8200"}, + Cmd: []string{ + "server", "-log-level=trace", "-dev", fmt.Sprintf("-dev-root-token-id=%s", rootToken), + "-dev-listen-address=0.0.0.0:8200", + }, Ports: []string{"8200/tcp"}, }) if err != nil { diff --git a/command/server_profile.go b/command/server_profile.go index c4304750d1..672d2847de 100644 --- a/command/server_profile.go +++ b/command/server_profile.go @@ -16,7 +16,7 @@ func init() { func (c *ServerCommand) startMemProfiler() { profileDir := filepath.Join(os.TempDir(), "vaultprof") - if err := os.MkdirAll(profileDir, 0700); err != nil { + if err := os.MkdirAll(profileDir, 0o700); err != nil { c.logger.Debug("could not create profile directory", "error", err) return } diff --git a/command/server_test.go b/command/server_test.go index ce35f9441f..8d66831051 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -111,12 +111,12 @@ func TestServer_ReloadListener(t *testing.T) { // Setup initial certs inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem") - ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777) + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key") - ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777) + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) relhcl := strings.Replace(reloadHCL, "TMPDIR", td, -1) - ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777) + ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem") certPool := x509.NewCertPool() @@ -168,10 +168,10 @@ func TestServer_ReloadListener(t *testing.T) { relhcl = strings.Replace(reloadHCL, "TMPDIR", td, -1) inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem") - ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777) + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key") - ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777) - ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777) + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) + ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) cmd.SighupCh <- struct{}{} select { diff --git a/command/server_util.go b/command/server_util.go index dd95e72a94..23e35fb613 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -5,9 +5,7 @@ import ( "github.com/hashicorp/vault/vault" ) -var ( - adjustCoreConfigForEnt = adjustCoreConfigForEntNoop -) +var adjustCoreConfigForEnt = adjustCoreConfigForEntNoop func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { } diff --git a/command/ssh.go b/command/ssh.go index a5c1daf6f7..7e01882630 100644 --- a/command/ssh.go +++ b/command/ssh.go @@ -20,8 +20,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*SSHCommand)(nil) -var _ cli.CommandAutocomplete = (*SSHCommand)(nil) +var ( + _ cli.Command = (*SSHCommand)(nil) + _ cli.CommandAutocomplete = (*SSHCommand)(nil) +) type SSHCommand struct { *BaseCommand @@ -371,7 +373,7 @@ func (c *SSHCommand) handleTypeCA(username, ip, port string, sshArgs []string) i sshClient := c.client.SSHWithMountPoint(c.flagMountPoint) - var principals = username + principals := username if c.flagValidPrincipals != "" { principals = c.flagValidPrincipals } @@ -446,7 +448,7 @@ func (c *SSHCommand) handleTypeCA(username, ip, port string, sshArgs []string) i // Write the known_hosts file name := fmt.Sprintf("vault_ssh_ca_known_hosts_%s_%s", username, ip) data := fmt.Sprintf("@cert-authority %s %s", c.flagHostKeyHostnames, publicKey) - knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0644) + knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0o644) defer closer() if err != nil { c.UI.Error(fmt.Sprintf("failed to write host public key: %s", err)) @@ -737,7 +739,7 @@ func (c *SSHCommand) writeTemporaryFile(name string, data []byte, perms os.FileM // writeTemporaryKey writes the key to a temporary file and returns the path. // The caller should defer the closer to cleanup the key. func (c *SSHCommand) writeTemporaryKey(name string, data []byte) (string, error, func() error) { - return c.writeTemporaryFile(name, data, 0600) + return c.writeTemporaryFile(name, data, 0o600) } // If user did not provide the role with which SSH connection has diff --git a/command/ssh_test.go b/command/ssh_test.go index 2b2250a009..344e3de0d2 100644 --- a/command/ssh_test.go +++ b/command/ssh_test.go @@ -26,7 +26,7 @@ func TestParseSSHCommand(t *testing.T) { t.Parallel() _, cmd := testSSHCommand(t) - var tests = []struct { + tests := []struct { name string args []string hostname string @@ -162,7 +162,6 @@ func TestParseSSHCommand(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - hostname, username, port, err := cmd.parseSSHCommand(test.args) if err != test.err { t.Errorf("got error: %q want %q", err, test.err) @@ -184,7 +183,7 @@ func TestIsSingleSSHArg(t *testing.T) { t.Parallel() _, cmd := testSSHCommand(t) - var tests = []struct { + tests := []struct { name string arg string want bool diff --git a/command/status.go b/command/status.go index 5d3c8eec51..770adfcf3d 100644 --- a/command/status.go +++ b/command/status.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*StatusCommand)(nil) -var _ cli.CommandAutocomplete = (*StatusCommand)(nil) +var ( + _ cli.Command = (*StatusCommand)(nil) + _ cli.CommandAutocomplete = (*StatusCommand)(nil) +) type StatusCommand struct { *BaseCommand diff --git a/command/token/helper_internal.go b/command/token/helper_internal.go index 6c65b9b682..c5f35721ee 100644 --- a/command/token/helper_internal.go +++ b/command/token/helper_internal.go @@ -65,7 +65,7 @@ func (i *InternalTokenHelper) Get() (string, error) { func (i *InternalTokenHelper) Store(input string) error { i.populateTokenPath() tmpFile := i.tokenPath + ".tmp" - f, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + f, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) if err != nil { return err } diff --git a/command/token/helper_internal_test.go b/command/token/helper_internal_test.go index c0f87662f1..18f3abae56 100644 --- a/command/token/helper_internal_test.go +++ b/command/token/helper_internal_test.go @@ -42,7 +42,7 @@ func TestInternalHelperFilePerms(t *testing.T) { t.Fatal(err) } - if fi.Mode().Perm()&004 != 004 { + if fi.Mode().Perm()&0o04 != 0o04 { t.Fatalf("expected world-readable/writable permission bits, got: %o", fi.Mode().Perm()) } @@ -56,7 +56,7 @@ func TestInternalHelperFilePerms(t *testing.T) { t.Fatal(err) } - if fi.Mode().Perm()&004 != 0 { + if fi.Mode().Perm()&0o04 != 0 { t.Fatalf("expected no world-readable/writable permission bits, got: %o", fi.Mode().Perm()) } } diff --git a/command/token_capabilities.go b/command/token_capabilities.go index d056ace532..2aa01961d4 100644 --- a/command/token_capabilities.go +++ b/command/token_capabilities.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*TokenCapabilitiesCommand)(nil) -var _ cli.CommandAutocomplete = (*TokenCapabilitiesCommand)(nil) +var ( + _ cli.Command = (*TokenCapabilitiesCommand)(nil) + _ cli.CommandAutocomplete = (*TokenCapabilitiesCommand)(nil) +) type TokenCapabilitiesCommand struct { *BaseCommand diff --git a/command/token_create.go b/command/token_create.go index 5c0c5353c6..a8dc2f03ea 100644 --- a/command/token_create.go +++ b/command/token_create.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*TokenCreateCommand)(nil) -var _ cli.CommandAutocomplete = (*TokenCreateCommand)(nil) +var ( + _ cli.Command = (*TokenCreateCommand)(nil) + _ cli.CommandAutocomplete = (*TokenCreateCommand)(nil) +) type TokenCreateCommand struct { *BaseCommand diff --git a/command/token_lookup.go b/command/token_lookup.go index 83b65f574e..55284a29d1 100644 --- a/command/token_lookup.go +++ b/command/token_lookup.go @@ -9,8 +9,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*TokenLookupCommand)(nil) -var _ cli.CommandAutocomplete = (*TokenLookupCommand)(nil) +var ( + _ cli.Command = (*TokenLookupCommand)(nil) + _ cli.CommandAutocomplete = (*TokenLookupCommand)(nil) +) type TokenLookupCommand struct { *BaseCommand diff --git a/command/token_renew.go b/command/token_renew.go index d535cfd1d8..d5a07df5e1 100644 --- a/command/token_renew.go +++ b/command/token_renew.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*TokenRenewCommand)(nil) -var _ cli.CommandAutocomplete = (*TokenRenewCommand)(nil) +var ( + _ cli.Command = (*TokenRenewCommand)(nil) + _ cli.CommandAutocomplete = (*TokenRenewCommand)(nil) +) type TokenRenewCommand struct { *BaseCommand diff --git a/command/token_revoke.go b/command/token_revoke.go index 351ce639c3..f6eb72101b 100644 --- a/command/token_revoke.go +++ b/command/token_revoke.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*TokenRevokeCommand)(nil) -var _ cli.CommandAutocomplete = (*TokenRevokeCommand)(nil) +var ( + _ cli.Command = (*TokenRevokeCommand)(nil) + _ cli.CommandAutocomplete = (*TokenRevokeCommand)(nil) +) type TokenRevokeCommand struct { *BaseCommand diff --git a/command/unwrap.go b/command/unwrap.go index 62184e6b71..53ff0787de 100644 --- a/command/unwrap.go +++ b/command/unwrap.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*UnwrapCommand)(nil) -var _ cli.CommandAutocomplete = (*UnwrapCommand)(nil) +var ( + _ cli.Command = (*UnwrapCommand)(nil) + _ cli.CommandAutocomplete = (*UnwrapCommand)(nil) +) // UnwrapCommand is a Command that behaves like ReadCommand but specifically for // unwrapping cubbyhole-wrapped secrets diff --git a/command/version.go b/command/version.go index 15c1f048f6..ad366601b7 100644 --- a/command/version.go +++ b/command/version.go @@ -8,8 +8,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*VersionCommand)(nil) -var _ cli.CommandAutocomplete = (*VersionCommand)(nil) +var ( + _ cli.Command = (*VersionCommand)(nil) + _ cli.CommandAutocomplete = (*VersionCommand)(nil) +) // VersionCommand is a Command implementation prints the version. type VersionCommand struct { diff --git a/command/write.go b/command/write.go index 793c9ba53f..dd0d7cfde9 100644 --- a/command/write.go +++ b/command/write.go @@ -10,8 +10,10 @@ import ( "github.com/posener/complete" ) -var _ cli.Command = (*WriteCommand)(nil) -var _ cli.CommandAutocomplete = (*WriteCommand)(nil) +var ( + _ cli.Command = (*WriteCommand)(nil) + _ cli.CommandAutocomplete = (*WriteCommand)(nil) +) // WriteCommand is a Command that puts data into the Vault. type WriteCommand struct { diff --git a/go.mod b/go.mod index c12623d9e8..1eb7497529 100644 --- a/go.mod +++ b/go.mod @@ -161,7 +161,7 @@ require ( golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c golang.org/x/text v0.3.5 // indirect - golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04 + golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c google.golang.org/api v0.29.0 google.golang.org/grpc v1.29.1 google.golang.org/protobuf v1.25.0 @@ -169,4 +169,5 @@ require ( gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/square/go-jose.v2 v2.5.1 layeh.com/radius v0.0.0-20190322222518-890bc1058917 + mvdan.cc/gofumpt v0.1.1 ) diff --git a/go.sum b/go.sum index f55f7949d4..61af543533 100644 --- a/go.sum +++ b/go.sum @@ -1084,6 +1084,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1206,6 +1207,7 @@ github.com/yandex-cloud/go-sdk v0.0.0-20200722140627-2194e5077f13/go.mod h1:LEdA github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1311,6 +1313,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1352,6 +1357,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c h1:dk0ukUIHmGHqASjP0iue2261isepFCC6XRCSd1nHgDw= golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1373,6 +1379,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1513,10 +1521,14 @@ golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200416214402-fc959738d646/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04 h1:LbW6ziLoA0vw8ZR4bmjKAzgEpunUBaEX1ia1Q1jEGC4= golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c h1:dS09fXwOFF9cXBnIzZexIuUBj95U1NyQjkEhkgidDow= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= @@ -1689,6 +1701,8 @@ k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTU k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= +mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/helper/dhutil/dhutil.go b/helper/dhutil/dhutil.go index 848b7b85c8..a86314c200 100644 --- a/helper/dhutil/dhutil.go +++ b/helper/dhutil/dhutil.go @@ -9,9 +9,10 @@ import ( "encoding/hex" "errors" "fmt" - "golang.org/x/crypto/hkdf" "io" + "golang.org/x/crypto/hkdf" + "golang.org/x/crypto/curve25519" ) diff --git a/helper/forwarding/util.go b/helper/forwarding/util.go index 90334052f0..de92639afb 100644 --- a/helper/forwarding/util.go +++ b/helper/forwarding/util.go @@ -192,7 +192,7 @@ func NewRPCResponseWriter() *RPCResponseWriter { body: new(bytes.Buffer), statusCode: 200, } - //w.header.Set("Content-Type", "application/octet-stream") + // w.header.Set("Content-Type", "application/octet-stream") return w } diff --git a/helper/metricsutil/bucket.go b/helper/metricsutil/bucket.go index 088516bd6e..9cbb2cdc2d 100644 --- a/helper/metricsutil/bucket.go +++ b/helper/metricsutil/bucket.go @@ -35,7 +35,6 @@ func TTLBucket(ttl time.Duration) string { } else { return bucketBoundaries[upperBound].Label } - } func ExpiryBucket(expiryTime time.Time, leaseEpsilon time.Duration, rollingWindow time.Time, labelNS string, useNS bool) *LeaseExpiryLabel { diff --git a/helper/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go index 0cd1dd9199..ce43d29d82 100644 --- a/helper/metricsutil/gauge_process.go +++ b/helper/metricsutil/gauge_process.go @@ -15,8 +15,7 @@ type clock interface { NewTicker(time.Duration) *time.Ticker } -type defaultClock struct { -} +type defaultClock struct{} func (_ defaultClock) Now() time.Time { return time.Now() @@ -219,7 +218,6 @@ func (p *GaugeCollectionProcess) streamGaugesToSink(values []GaugeLabelValues) { case <-sendTick.C: break } - } p.sink.SetGaugeWithLabels(p.key, lv.Value, lv.Labels) } diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go index 7a12e90c90..39b3a0e78d 100644 --- a/helper/metricsutil/gauge_process_test.go +++ b/helper/metricsutil/gauge_process_test.go @@ -405,7 +405,6 @@ func TestGauge_InterruptedStreaming(t *testing.T) { t.Errorf("Found %v gauges, expected fewer.", len(intervals[0].Gauges)) } - } // helper function to create a closure that's a GaugeCollector. diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go index 31adf69551..0abb8148e3 100644 --- a/helper/metricsutil/metricsutil.go +++ b/helper/metricsutil/metricsutil.go @@ -81,12 +81,13 @@ func (m *MetricsHelper) AddGaugeLoopMetric(key []string, val float32, labels []L GaugeMetric{ Key: key, Value: val, - Labels: labels}) + Labels: labels, + }) } func (m *MetricsHelper) CreateMetricsCacheKeyName(key []string, val float32, labels []Label) string { var keyJoin string = strings.Join(key, ".") - var labelJoinStr = "" + labelJoinStr := "" for _, label := range labels { labelJoinStr = labelJoinStr + label.Name + "|" + label.Value + "||" } diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go index ecef295b1a..4f866939c6 100644 --- a/helper/metricsutil/wrapped_metrics.go +++ b/helper/metricsutil/wrapped_metrics.go @@ -128,7 +128,9 @@ func NamespaceLabel(ns *namespace.Namespace) metrics.Label { case ns.ID == namespace.RootNamespaceID: return metrics.Label{"namespace", "root"} default: - return metrics.Label{"namespace", - strings.Trim(ns.Path, "/")} + return metrics.Label{ + "namespace", + strings.Trim(ns.Path, "/"), + } } } diff --git a/helper/metricsutil/wrapped_metrics_test.go b/helper/metricsutil/wrapped_metrics_test.go index 8ee82af587..c0fb2c3866 100644 --- a/helper/metricsutil/wrapped_metrics_test.go +++ b/helper/metricsutil/wrapped_metrics_test.go @@ -107,5 +107,4 @@ func TestClusterLabelPresent(t *testing.T) { if !isLabelPresent(clusterLabel, s.Labels) { t.Error("Sample label", s.Labels, "does not include", clusterLabel) } - } diff --git a/helper/mfa/duo/path_duo_access.go b/helper/mfa/duo/path_duo_access.go index 9dfc4a1b29..e532137979 100644 --- a/helper/mfa/duo/path_duo_access.go +++ b/helper/mfa/duo/path_duo_access.go @@ -20,15 +20,15 @@ func pathDuoAccess() *framework.Path { return &framework.Path{ Pattern: `duo/access`, Fields: map[string]*framework.FieldSchema{ - "skey": &framework.FieldSchema{ + "skey": { Type: framework.TypeString, Description: "Duo secret key", }, - "ikey": &framework.FieldSchema{ + "ikey": { Type: framework.TypeString, Description: "Duo integration key", }, - "host": &framework.FieldSchema{ + "host": { Type: framework.TypeString, Description: "Duo api host", }, diff --git a/helper/mfa/duo/path_duo_config.go b/helper/mfa/duo/path_duo_config.go index 8692d60acb..d2299d3d64 100644 --- a/helper/mfa/duo/path_duo_config.go +++ b/helper/mfa/duo/path_duo_config.go @@ -13,15 +13,15 @@ func pathDuoConfig() *framework.Path { return &framework.Path{ Pattern: `duo/config`, Fields: map[string]*framework.FieldSchema{ - "user_agent": &framework.FieldSchema{ + "user_agent": { Type: framework.TypeString, Description: "User agent to connect to Duo (default \"\")", }, - "username_format": &framework.FieldSchema{ + "username_format": { Type: framework.TypeString, Description: "Format string given auth method username as argument to create Duo username (default '%s')", }, - "push_info": &framework.FieldSchema{ + "push_info": { Type: framework.TypeString, Description: "A string of URL-encoded key/value pairs that provides additional context about the authentication attempt in the Duo Mobile app", }, diff --git a/helper/mfa/mfa_test.go b/helper/mfa/mfa_test.go index 85a7d1291d..6e338ae191 100644 --- a/helper/mfa/mfa_test.go +++ b/helper/mfa/mfa_test.go @@ -33,7 +33,7 @@ func testPathLogin() *framework.Path { return &framework.Path{ Pattern: `login`, Fields: map[string]*framework.FieldSchema{ - "username": &framework.FieldSchema{ + "username": { Type: framework.TypeString, }, }, diff --git a/helper/mfa/path_mfa_config.go b/helper/mfa/path_mfa_config.go index a996c93829..84b3ea133f 100644 --- a/helper/mfa/path_mfa_config.go +++ b/helper/mfa/path_mfa_config.go @@ -11,7 +11,7 @@ func pathMFAConfig(b *backend) *framework.Path { return &framework.Path{ Pattern: `mfa_config`, Fields: map[string]*framework.FieldSchema{ - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: "Enables MFA with given backend (available: duo)", }, diff --git a/helper/pgpkeys/flag.go b/helper/pgpkeys/flag.go index 4d5fd96922..2a85f0d580 100644 --- a/helper/pgpkeys/flag.go +++ b/helper/pgpkeys/flag.go @@ -139,5 +139,4 @@ func ReadPGPFile(path string) (string, error) { return buf.String(), nil } return base64.StdEncoding.EncodeToString(buf.Bytes()), nil - } diff --git a/helper/pgpkeys/flag_test.go b/helper/pgpkeys/flag_test.go index 6fa6718b26..de08899259 100644 --- a/helper/pgpkeys/flag_test.go +++ b/helper/pgpkeys/flag_test.go @@ -36,7 +36,7 @@ func TestPubKeyFilesFlagSetBinary(t *testing.T) { if err != nil { t.Fatalf("Error decoding bytes for public key 1: %s", err) } - err = ioutil.WriteFile(tempDir+"/pubkey1", pub1Bytes, 0755) + err = ioutil.WriteFile(tempDir+"/pubkey1", pub1Bytes, 0o755) if err != nil { t.Fatalf("Error writing pub key 1 to temp file: %s", err) } @@ -44,7 +44,7 @@ func TestPubKeyFilesFlagSetBinary(t *testing.T) { if err != nil { t.Fatalf("Error decoding bytes for public key 2: %s", err) } - err = ioutil.WriteFile(tempDir+"/pubkey2", pub2Bytes, 0755) + err = ioutil.WriteFile(tempDir+"/pubkey2", pub2Bytes, 0o755) if err != nil { t.Fatalf("Error writing pub key 2 to temp file: %s", err) } @@ -52,7 +52,7 @@ func TestPubKeyFilesFlagSetBinary(t *testing.T) { if err != nil { t.Fatalf("Error decoding bytes for public key 3: %s", err) } - err = ioutil.WriteFile(tempDir+"/pubkey3", pub3Bytes, 0755) + err = ioutil.WriteFile(tempDir+"/pubkey3", pub3Bytes, 0o755) if err != nil { t.Fatalf("Error writing pub key 3 to temp file: %s", err) } @@ -81,15 +81,15 @@ func TestPubKeyFilesFlagSetB64(t *testing.T) { } defer os.RemoveAll(tempDir) - err = ioutil.WriteFile(tempDir+"/pubkey1", []byte(pubKey1), 0755) + err = ioutil.WriteFile(tempDir+"/pubkey1", []byte(pubKey1), 0o755) if err != nil { t.Fatalf("Error writing pub key 1 to temp file: %s", err) } - err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0755) + err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0o755) if err != nil { t.Fatalf("Error writing pub key 2 to temp file: %s", err) } - err = ioutil.WriteFile(tempDir+"/pubkey3", []byte(pubKey3), 0755) + err = ioutil.WriteFile(tempDir+"/pubkey3", []byte(pubKey3), 0o755) if err != nil { t.Fatalf("Error writing pub key 3 to temp file: %s", err) } @@ -118,7 +118,7 @@ func TestPubKeyFilesFlagSetKeybase(t *testing.T) { } defer os.RemoveAll(tempDir) - err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0755) + err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0o755) if err != nil { t.Fatalf("Error writing pub key 2 to temp file: %s", err) } @@ -183,6 +183,7 @@ PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O 9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx 8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + const pubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e @@ -209,6 +210,7 @@ PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4 MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=` + const pubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj 6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH diff --git a/helper/policies/policies.go b/helper/policies/policies.go index 1e25522f43..729ce10b2f 100644 --- a/helper/policies/policies.go +++ b/helper/policies/policies.go @@ -33,10 +33,10 @@ func EquivalentPolicies(a, b []string) bool { // Now we'll build our checking slices var sortedA, sortedB []string - for keyA, _ := range mapA { + for keyA := range mapA { sortedA = append(sortedA, keyA) } - for keyB, _ := range mapB { + for keyB := range mapB { sortedB = append(sortedB, keyB) } sort.Strings(sortedA) diff --git a/helper/random/serializing.go b/helper/random/serializing.go index c99d631aad..93371df028 100644 --- a/helper/random/serializing.go +++ b/helper/random/serializing.go @@ -40,7 +40,7 @@ func (r serializableRules) MarshalJSON() (b []byte, err error) { } ruleMap := map[string][]map[string]interface{}{ - rule.Type(): []map[string]interface{}{ + rule.Type(): { ruleData, }, } diff --git a/helper/testhelpers/azurite/azurite.go b/helper/testhelpers/azurite/azurite.go index 37fe143911..13d65750d4 100644 --- a/helper/testhelpers/azurite/azurite.go +++ b/helper/testhelpers/azurite/azurite.go @@ -57,8 +57,10 @@ func (c Config) ContainerURL(ctx context.Context, container string) (*azblob.Con var _ docker.ServiceConfig = &Config{} -const accountName = "testaccount" -const accountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" +const ( + accountName = "testaccount" + accountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" +) func PrepareTestContainer(t *testing.T, version string) (func(), docker.ServiceConfig) { if version == "" { diff --git a/helper/testhelpers/docker/testhelpers.go b/helper/testhelpers/docker/testhelpers.go index 270dc2f573..673a0dea7e 100644 --- a/helper/testhelpers/docker/testhelpers.go +++ b/helper/testhelpers/docker/testhelpers.go @@ -213,7 +213,7 @@ func (d *Runner) Start(ctx context.Context) (*types.ContainerJSON, []string, err netConfig := &network.NetworkingConfig{} if d.RunOptions.NetworkID != "" { netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - d.RunOptions.NetworkID: &network.EndpointSettings{}, + d.RunOptions.NetworkID: {}, } } diff --git a/helper/testhelpers/ldap/ldaphelper.go b/helper/testhelpers/ldap/ldaphelper.go index f8b468c3e9..c6c44bd90e 100644 --- a/helper/testhelpers/ldap/ldaphelper.go +++ b/helper/testhelpers/ldap/ldaphelper.go @@ -18,7 +18,7 @@ func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ld ImageTag: version, ContainerName: "ldap", Ports: []string{"389/tcp"}, - //Env: []string{"LDAP_DEBUG_LEVEL=384"}, + // Env: []string{"LDAP_DEBUG_LEVEL=384"}, }) if err != nil { t.Fatalf("could not start local LDAP docker container: %s", err) @@ -54,7 +54,6 @@ func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ld return docker.NewServiceURLParse(connURL) }) - if err != nil { t.Fatalf("could not start local LDAP docker container: %s", err) } diff --git a/helper/testhelpers/minio/miniohelper.go b/helper/testhelpers/minio/miniohelper.go index 168add797e..c537585059 100644 --- a/helper/testhelpers/minio/miniohelper.go +++ b/helper/testhelpers/minio/miniohelper.go @@ -21,8 +21,10 @@ type Config struct { Region string } -const accessKeyID = "min-access-key" -const secretKey = "min-secret-key" +const ( + accessKeyID = "min-access-key" + secretKey = "min-secret-key" +) func PrepareTestContainer(t *testing.T, version string) (func(), *Config) { if version == "" { diff --git a/helper/testhelpers/mongodb/mongodbhelper.go b/helper/testhelpers/mongodb/mongodbhelper.go index f9b1657e71..3dff8484b3 100644 --- a/helper/testhelpers/mongodb/mongodbhelper.go +++ b/helper/testhelpers/mongodb/mongodbhelper.go @@ -64,7 +64,6 @@ func PrepareTestContainerWithDatabase(t *testing.T, version, dbName string) (fun return docker.NewServiceURLParse(connURL) }) - if err != nil { t.Fatalf("could not start docker mongo: %s", err) } diff --git a/helper/testhelpers/mysql/mysqlhelper.go b/helper/testhelpers/mysql/mysqlhelper.go index 147900a320..145c91e253 100644 --- a/helper/testhelpers/mysql/mysqlhelper.go +++ b/helper/testhelpers/mysql/mysqlhelper.go @@ -52,7 +52,6 @@ func PrepareTestContainer(t *testing.T, legacy bool, pw string) (func(), string) } return &Config{ServiceHostPort: *hostIP, ConnString: connString}, nil }) - if err != nil { t.Fatalf("could not start docker mysql: %s", err) } diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go index 0ee5b81412..d15249a850 100644 --- a/helper/testhelpers/postgresql/postgresqlhelper.go +++ b/helper/testhelpers/postgresql/postgresqlhelper.go @@ -4,10 +4,11 @@ import ( "context" "database/sql" "fmt" - "github.com/hashicorp/vault/helper/testhelpers/docker" "net/url" "os" "testing" + + "github.com/hashicorp/vault/helper/testhelpers/docker" ) func PrepareTestContainer(t *testing.T, version string) (func(), string) { diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 065512f8e1..392f9408fc 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -370,7 +370,7 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by } var statusResp *api.RekeyUpdateResponse - var keys = cluster.BarrierKeys + keys := cluster.BarrierKeys if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() { keys = cluster.RecoveryKeys } @@ -436,7 +436,6 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib } func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) @@ -452,7 +451,7 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { } leaderInfos := []*raft.LeaderJoinInfo{ - &raft.LeaderJoinInfo{ + { LeaderAPIAddr: leader.Client.Address(), TLSConfig: leader.TLSConfig, }, @@ -494,7 +493,6 @@ func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftli // NewHardcodedServerAddressProvider is a convenience function that makes a // ServerAddressProvider from a given cluster address base port. func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.ServerAddressProvider { - entries := make(map[raftlib.ServerID]raftlib.ServerAddress) for i := 0; i < numCores; i++ { @@ -512,7 +510,6 @@ func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.Se // the correct number of servers, having the correct NodeIDs, and exactly one // leader. func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error { - backend := core.UnderlyingRawStorage.(*raft.RaftBackend) ctx := namespace.RootContext(context.Background()) config, err := backend.GetConfiguration(ctx) @@ -563,7 +560,6 @@ func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) { // AwaitLeader waits for one of the cluster's nodes to become leader. func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) { - timeout := time.Now().Add(30 * time.Second) for { if time.Now().After(timeout) { diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go index a00bac000a..915d435dab 100644 --- a/helper/testhelpers/teststorage/teststorage.go +++ b/helper/testhelpers/teststorage/teststorage.go @@ -85,7 +85,7 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma if err != nil { t.Fatal(err) } - //t.Logf("raft dir: %s", raftDir) + // t.Logf("raft dir: %s", raftDir) cleanupFunc := func() { os.RemoveAll(raftDir) } @@ -179,9 +179,11 @@ type ClusterSetupMutator func(conf *vault.CoreConfig, opts *vault.TestClusterOpt func InmemBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemBackend) } + func InmemNonTransactionalBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemNonTransactionalBackend) } + func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeFileBackend) } diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index 660f9542df..69f3ec310d 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -39,7 +39,6 @@ type StorageCleanup func() // MakeReusableStorage makes a physical backend that can be re-used across // multiple test clusters in sequence. func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { - storage := ReusableStorage{ IsRaft: false, @@ -74,7 +73,6 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica // MakeReusableRaftStorage makes a physical raft backend that can be re-used // across multiple test clusters in sequence. func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) { - raftDirs := make([]string, numCores) for i := 0; i < numCores; i++ { raftDirs[i] = makeRaftDir(t) @@ -164,12 +162,11 @@ func makeRaftDir(t testing.T) string { if err != nil { t.Fatal(err) } - //t.Logf("raft dir: %s", raftDir) + // t.Logf("raft dir: %s", raftDir) return raftDir } func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle { - nodeID := fmt.Sprintf("core-%d", coreIdx) conf := map[string]string{ "path": raftDir, diff --git a/helper/xor/xor.go b/helper/xor/xor.go index 0d9567eb56..6e03c23621 100644 --- a/helper/xor/xor.go +++ b/helper/xor/xor.go @@ -17,7 +17,7 @@ func XORBytes(a, b []byte) ([]byte, error) { buf := make([]byte, len(a)) - for i, _ := range a { + for i := range a { buf[i] = a[i] ^ b[i] } diff --git a/http/auth_token_test.go b/http/auth_token_test.go index 5faba6112e..552a32cbdd 100644 --- a/http/auth_token_test.go +++ b/http/auth_token_test.go @@ -114,7 +114,6 @@ func TestAuthTokenLookup(t *testing.T) { if secret.Data["id"] != secret2.Auth.ClientToken { t.Errorf("Did not get back details about our provided token, id returned=%s", secret.Data["id"]) } - } func TestAuthTokenLookupSelf(t *testing.T) { @@ -143,7 +142,6 @@ func TestAuthTokenLookupSelf(t *testing.T) { if secret.Data["display_name"] != "root" { t.Errorf("Did not get back details about our own (self) token, display_name returned=%s", secret.Data["display_name"]) } - } func TestAuthTokenRenew(t *testing.T) { diff --git a/http/forwarding_bench_test.go b/http/forwarding_bench_test.go index 14d72081e6..0743181568 100644 --- a/http/forwarding_bench_test.go +++ b/http/forwarding_bench_test.go @@ -84,7 +84,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { default: b.Fatalf("bad status code: %d, resp: %s", w.StatusCode(), w.Body().String()) } - //b.Log(w.Body().String()) + // b.Log(w.Body().String()) numOps++ } diff --git a/http/forwarding_test.go b/http/forwarding_test.go index 0e1a85758b..f0225a4223 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -174,7 +174,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) }, } - //core.Logger().Printf("[TRACE] mounting transit") + // core.Logger().Printf("[TRACE] mounting transit") req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) if err != nil { @@ -185,7 +185,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) if err != nil { t.Fatal(err) } - //core.Logger().Printf("[TRACE] done mounting transit") + // core.Logger().Printf("[TRACE] done mounting transit") var totalOps *uint32 = new(uint32) var successfulOps *uint32 = new(uint32) @@ -316,7 +316,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) switch chosenFunc { // Encrypt our plaintext and store the result case "encrypt": - //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) + // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)))) if err != nil { panic(err) @@ -343,7 +343,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) continue } - //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) + // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct)))) if err != nil { panic(err) @@ -372,7 +372,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // Rotate to a new key version case "rotate": - //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) + // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}"))) if err != nil { panic(err) @@ -408,7 +408,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) setVersion := (myRand.Int31() % latestVersion) + 1 - //core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion) + // core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion) _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion)))) if err != nil { @@ -425,7 +425,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // Spawn some of these workers for 10 seconds for i := 0; i < int(atomic.LoadUint32(numWorkers)); i++ { wg.Add(1) - //core.Logger().Printf("[TRACE] spawning %d", i) + // core.Logger().Printf("[TRACE] spawning %d", i) go doFuzzy(i+1, parallel) } diff --git a/http/handler.go b/http/handler.go index ba46fb5845..dc52598ef9 100644 --- a/http/handler.go +++ b/http/handler.go @@ -474,7 +474,6 @@ func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { func handleUI(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // The fileserver handler strips trailing slashes and does a redirect. // We don't want the redirect to happen so we preemptively trim the slash // here. diff --git a/http/handler_test.go b/http/handler_test.go index 54ebe21da6..8342c69df2 100644 --- a/http/handler_test.go +++ b/http/handler_test.go @@ -566,7 +566,6 @@ func TestHandler_requestAuth(t *testing.T) { rootCtx := namespace.RootContext(nil) te, err := core.LookupToken(rootCtx, token) - if err != nil { t.Fatalf("err: %s", err) } @@ -630,7 +629,6 @@ func TestHandler_requestAuth(t *testing.T) { if err == nil { t.Fatalf("expected an error, got none") } - } func TestHandler_getTokenFromReq(t *testing.T) { diff --git a/http/logical_test.go b/http/logical_test.go index fd41df8aa0..05d6bf4eac 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -279,6 +279,7 @@ func TestLogical_RequestSizeLimit(t *testing.T) { }) testResponseStatus(t, resp, http.StatusRequestEntityTooLarge) } + func TestLogical_RequestSizeDisableLimit(t *testing.T) { core, _, token := vault.TestCoreUnsealed(t) ln, addr := TestListener(t) @@ -497,5 +498,4 @@ func TestLogical_ShouldParseForm(t *testing.T) { t.Fatalf("%s fail: expected isForm %t, got %t", name, test.isForm, isForm) } } - } diff --git a/http/plugin_test.go b/http/plugin_test.go index 305ce2457e..e14e4df097 100644 --- a/http/plugin_test.go +++ b/http/plugin_test.go @@ -156,7 +156,6 @@ func TestPlugin_MockRawResponse(t *testing.T) { if resp.StatusCode != 200 { t.Fatal("bad status") } - } func TestPlugin_GetParams(t *testing.T) { diff --git a/http/sys_config_cors_test.go b/http/sys_config_cors_test.go index bd6c7aeae8..3ad0e810a2 100644 --- a/http/sys_config_cors_test.go +++ b/http/sys_config_cors_test.go @@ -74,5 +74,4 @@ func TestSysConfigCors(t *testing.T) { if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - } diff --git a/http/sys_config_state_test.go b/http/sys_config_state_test.go index 1167ebb288..53a605b427 100644 --- a/http/sys_config_state_test.go +++ b/http/sys_config_state_test.go @@ -64,5 +64,4 @@ func TestSysConfigState_Sanitized(t *testing.T) { if diff := deep.Equal(actual, expected); len(diff) > 0 { t.Fatalf("bad mismatch response body: diff: %v", diff) } - } diff --git a/http/sys_health.go b/http/sys_health.go index 49d9fe97d0..fe382b62d6 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -43,7 +43,6 @@ func fetchStatusCode(r *http.Request, field string) (int, bool, bool) { func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { code, body, err := getSysHealth(core, r) - if err != nil { core.Logger().Error("error checking health", "error", err) respondError(w, code, nil) diff --git a/http/sys_health_test.go b/http/sys_health_test.go index 1a7dc22c5f..8cf373d94a 100644 --- a/http/sys_health_test.go +++ b/http/sys_health_test.go @@ -2,7 +2,6 @@ package http import ( "io/ioutil" - "net/http" "net/url" "reflect" @@ -118,7 +117,6 @@ func TestSysHealth_get(t *testing.T) { if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) } - } func TestSysHealth_customcodes(t *testing.T) { diff --git a/http/sys_leader.go b/http/sys_leader.go index 77cc884f68..8c2ce21e50 100644 --- a/http/sys_leader.go +++ b/http/sys_leader.go @@ -1,8 +1,9 @@ package http import ( - "github.com/hashicorp/vault/vault" "net/http" + + "github.com/hashicorp/vault/vault" ) // This endpoint is needed to answer queries before Vault unseals diff --git a/http/sys_monitor_test.go b/http/sys_monitor_test.go index 97c701fc38..f687ab2090 100644 --- a/http/sys_monitor_test.go +++ b/http/sys_monitor_test.go @@ -53,7 +53,6 @@ func TestSysMonitorStreamingLogs(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) defer cancel() logCh, err := client.Sys().Monitor(ctx, "DEBUG") - if err != nil { t.Fatal(err) } diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go index a235bf862e..157be7845a 100644 --- a/http/sys_mount_test.go +++ b/http/sys_mount_test.go @@ -336,7 +336,6 @@ func TestSysMount(t *testing.T) { if diff := deep.Equal(actual, expected); len(diff) > 0 { t.Fatalf("bad, diff: %#v", diff) } - } func TestSysMount_put(t *testing.T) { diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go index fbd058e195..a888039a6c 100644 --- a/http/sys_seal_test.go +++ b/http/sys_seal_test.go @@ -262,7 +262,6 @@ func TestSysUnseal_Reset(t *testing.T) { if diff := deep.Equal(actual, expected); diff != nil { t.Fatal(diff) } - } // Test Seal's permissions logic, which is slightly different than normal code diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go index e9c7d5d549..fc527cc2af 100644 --- a/internalshared/configutil/config_util.go +++ b/internalshared/configutil/config_util.go @@ -6,8 +6,7 @@ import ( "github.com/hashicorp/hcl/hcl/ast" ) -type EntSharedConfig struct { -} +type EntSharedConfig struct{} func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error { return nil diff --git a/internalshared/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go index f651b340fa..7f0602ef0d 100644 --- a/internalshared/configutil/encrypt_decrypt.go +++ b/internalshared/configutil/encrypt_decrypt.go @@ -87,6 +87,6 @@ func EncryptDecrypt(rawStr string, decrypt, strip bool, wrapper wrapping.Wrapper prevMaxLoc = match[1] } // At the end, append the rest - out = append(out, raw[prevMaxLoc:len(raw)]...) + out = append(out, raw[prevMaxLoc:]...) return string(out), nil } diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go index 4d14ba23b9..bc4a7f5fb9 100644 --- a/internalshared/configutil/encrypt_decrypt_test.go +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -99,9 +99,11 @@ func (r *reversingWrapper) Encrypt(_ context.Context, input []byte, _ []byte) (* Ciphertext: r.reverse(input), }, nil } + func (r *reversingWrapper) Decrypt(_ context.Context, input *wrapping.EncryptedBlobInfo, _ []byte) ([]byte, error) { return r.reverse(input.Ciphertext), nil } + func (r *reversingWrapper) reverse(input []byte) []byte { output := make([]byte, len(input)) for i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 { diff --git a/internalshared/configutil/telemetry.go b/internalshared/configutil/telemetry.go index 4f31605b73..932f11ebcd 100644 --- a/internalshared/configutil/telemetry.go +++ b/internalshared/configutil/telemetry.go @@ -370,7 +370,6 @@ func SetupTelemetry(opts *SetupTelemetryOpts) (*metrics.InmemSink, *metricsutil. } fanout = append(fanout, inm) globalMetrics, err := metrics.NewGlobal(metricsConf, fanout) - if err != nil { return nil, nil, false, err } diff --git a/internalshared/listenerutil/listener_test.go b/internalshared/listenerutil/listener_test.go index c6bc5afad7..3c2afa593a 100644 --- a/internalshared/listenerutil/listener_test.go +++ b/internalshared/listenerutil/listener_test.go @@ -84,5 +84,4 @@ func TestUnixSocketListener(t *testing.T) { t.Fatalf("failed to set permissions on the socket file") } }) - } diff --git a/internalshared/reloadutil/reload_test.go b/internalshared/reloadutil/reload_test.go index 872ab37168..910ee296ec 100644 --- a/internalshared/reloadutil/reload_test.go +++ b/internalshared/reloadutil/reload_test.go @@ -49,11 +49,11 @@ opM24uvQT3Bc0UM0WNh3tdRFuboxDeBDh7PX/2RIoiaMuCCiRZ3O0A== keyFile := tempDir + "/server.key" certFile := tempDir + "/server.crt" - err = ioutil.WriteFile(certFile, cert, 0755) + err = ioutil.WriteFile(certFile, cert, 0o755) if err != nil { t.Fatalf("Error writing to temp file: %s", err) } - err = ioutil.WriteFile(keyFile, key, 0755) + err = ioutil.WriteFile(keyFile, key, 0o755) if err != nil { t.Fatalf("Error writing to temp file: %s", err) } diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go index 1f5989ac49..1845248d3a 100644 --- a/physical/aerospike/aerospike_test.go +++ b/physical/aerospike/aerospike_test.go @@ -24,7 +24,6 @@ func TestAerospikeBackend(t *testing.T) { "namespace": config.namespace, "set": config.set, }, logger) - if err != nil { t.Fatalf("err: %s", err) } diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go index cc52e07b1c..c58ece46b9 100644 --- a/physical/alicloudoss/alicloudoss.go +++ b/physical/alicloudoss/alicloudoss.go @@ -129,7 +129,6 @@ func (a *AliCloudOSSBackend) Put(ctx context.Context, entry *physical.Entry) err } return bucket.PutObject(entry.Key, bytes.NewReader(entry.Value)) - } // Get is used to fetch an entry diff --git a/physical/alicloudoss/alicloudoss_test.go b/physical/alicloudoss/alicloudoss_test.go index 53128e72f0..ad292da4f6 100644 --- a/physical/alicloudoss/alicloudoss_test.go +++ b/physical/alicloudoss/alicloudoss_test.go @@ -28,7 +28,7 @@ func TestAliCloudOSSBackend(t *testing.T) { t.Fatalf("unable to create test client: %s", err) } - var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() bucket := fmt.Sprintf("vault-alibaba-testacc-%d", randInt) err = conn.CreateBucket(bucket) diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index 6530bd2041..9a5ea13b71 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -166,7 +166,7 @@ func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) err return nil } - var tlsConfig = &tls.Config{} + tlsConfig := &tls.Config{} if pemBundlePath, ok := conf["pem_bundle_file"]; ok { pemBundleData, err := ioutil.ReadFile(pemBundlePath) if err != nil { @@ -225,7 +225,8 @@ func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) err } cluster.SslOpts = &gocql.SslOptions{ - Config: tlsConfig.Clone()} + Config: tlsConfig.Clone(), + } return nil } diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go index f5ba015915..12469889fd 100644 --- a/physical/cassandra/cassandra_test.go +++ b/physical/cassandra/cassandra_test.go @@ -1,13 +1,14 @@ package cassandra import ( + "os" + "reflect" + "testing" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers/cassandra" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" - "os" - "reflect" - "testing" ) func TestCassandraBackend(t *testing.T) { @@ -27,7 +28,6 @@ func TestCassandraBackend(t *testing.T) { "hosts": hosts, "protocol_version": "3", }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } @@ -41,7 +41,8 @@ func TestCassandraBackendBuckets(t *testing.T) { "": {"."}, "a": {"."}, "a/b": {".", "a"}, - "a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"}} + "a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"}, + } b := &CassandraBackend{} for input, expected := range expectations { diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go index c169869f5a..399b7fc610 100644 --- a/physical/cockroachdb/cockroachdb_test.go +++ b/physical/cockroachdb/cockroachdb_test.go @@ -96,7 +96,6 @@ func TestCockroachDBBackend(t *testing.T) { "connection_url": config.URL().String(), "table": config.TableName, }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } diff --git a/physical/cockroachdb/keywords.go b/physical/cockroachdb/keywords.go index b7ecb05d7d..390dc63f8d 100644 --- a/physical/cockroachdb/keywords.go +++ b/physical/cockroachdb/keywords.go @@ -1,440 +1,438 @@ package cockroachdb -var ( - // sqlKeywords is a reference of all of the keywords that we do not allow for use as the table name - // Referenced from: - // https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers - // -> https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#keywords - // -> https://www.cockroachlabs.com/docs/stable/sql-grammar.html - sqlKeywords = map[string]bool{ - // reserved_keyword - // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#reserved_keyword - "ALL": true, - "ANALYSE": true, - "ANALYZE": true, - "AND": true, - "ANY": true, - "ARRAY": true, - "AS": true, - "ASC": true, - "ASYMMETRIC": true, - "BOTH": true, - "CASE": true, - "CAST": true, - "CHECK": true, - "COLLATE": true, - "COLUMN": true, - "CONCURRENTLY": true, - "CONSTRAINT": true, - "CREATE": true, - "CURRENT_CATALOG": true, - "CURRENT_DATE": true, - "CURRENT_ROLE": true, - "CURRENT_SCHEMA": true, - "CURRENT_TIME": true, - "CURRENT_TIMESTAMP": true, - "CURRENT_USER": true, - "DEFAULT": true, - "DEFERRABLE": true, - "DESC": true, - "DISTINCT": true, - "DO": true, - "ELSE": true, - "END": true, - "EXCEPT": true, - "FALSE": true, - "FETCH": true, - "FOR": true, - "FOREIGN": true, - "FROM": true, - "GRANT": true, - "GROUP": true, - "HAVING": true, - "IN": true, - "INITIALLY": true, - "INTERSECT": true, - "INTO": true, - "LATERAL": true, - "LEADING": true, - "LIMIT": true, - "LOCALTIME": true, - "LOCALTIMESTAMP": true, - "NOT": true, - "NULL": true, - "OFFSET": true, - "ON": true, - "ONLY": true, - "OR": true, - "ORDER": true, - "PLACING": true, - "PRIMARY": true, - "REFERENCES": true, - "RETURNING": true, - "SELECT": true, - "SESSION_USER": true, - "SOME": true, - "SYMMETRIC": true, - "TABLE": true, - "THEN": true, - "TO": true, - "TRAILING": true, - "TRUE": true, - "UNION": true, - "UNIQUE": true, - "USER": true, - "USING": true, - "VARIADIC": true, - "WHEN": true, - "WHERE": true, - "WINDOW": true, - "WITH": true, +// sqlKeywords is a reference of all of the keywords that we do not allow for use as the table name +// Referenced from: +// https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers +// -> https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#keywords +// -> https://www.cockroachlabs.com/docs/stable/sql-grammar.html +var sqlKeywords = map[string]bool{ + // reserved_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#reserved_keyword + "ALL": true, + "ANALYSE": true, + "ANALYZE": true, + "AND": true, + "ANY": true, + "ARRAY": true, + "AS": true, + "ASC": true, + "ASYMMETRIC": true, + "BOTH": true, + "CASE": true, + "CAST": true, + "CHECK": true, + "COLLATE": true, + "COLUMN": true, + "CONCURRENTLY": true, + "CONSTRAINT": true, + "CREATE": true, + "CURRENT_CATALOG": true, + "CURRENT_DATE": true, + "CURRENT_ROLE": true, + "CURRENT_SCHEMA": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "CURRENT_USER": true, + "DEFAULT": true, + "DEFERRABLE": true, + "DESC": true, + "DISTINCT": true, + "DO": true, + "ELSE": true, + "END": true, + "EXCEPT": true, + "FALSE": true, + "FETCH": true, + "FOR": true, + "FOREIGN": true, + "FROM": true, + "GRANT": true, + "GROUP": true, + "HAVING": true, + "IN": true, + "INITIALLY": true, + "INTERSECT": true, + "INTO": true, + "LATERAL": true, + "LEADING": true, + "LIMIT": true, + "LOCALTIME": true, + "LOCALTIMESTAMP": true, + "NOT": true, + "NULL": true, + "OFFSET": true, + "ON": true, + "ONLY": true, + "OR": true, + "ORDER": true, + "PLACING": true, + "PRIMARY": true, + "REFERENCES": true, + "RETURNING": true, + "SELECT": true, + "SESSION_USER": true, + "SOME": true, + "SYMMETRIC": true, + "TABLE": true, + "THEN": true, + "TO": true, + "TRAILING": true, + "TRUE": true, + "UNION": true, + "UNIQUE": true, + "USER": true, + "USING": true, + "VARIADIC": true, + "WHEN": true, + "WHERE": true, + "WINDOW": true, + "WITH": true, - // cockroachdb_extra_reserved_keyword - // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#cockroachdb_extra_reserved_keyword - "INDEX": true, - "NOTHING": true, + // cockroachdb_extra_reserved_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#cockroachdb_extra_reserved_keyword + "INDEX": true, + "NOTHING": true, - // type_func_name_keyword - // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#type_func_name_keyword - "COLLATION": true, - "CROSS": true, - "FULL": true, - "INNER": true, - "ILIKE": true, - "IS": true, - "ISNULL": true, - "JOIN": true, - "LEFT": true, - "LIKE": true, - "NATURAL": true, - "NONE": true, - "NOTNULL": true, - "OUTER": true, - "OVERLAPS": true, - "RIGHT": true, - "SIMILAR": true, - "FAMILY": true, + // type_func_name_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#type_func_name_keyword + "COLLATION": true, + "CROSS": true, + "FULL": true, + "INNER": true, + "ILIKE": true, + "IS": true, + "ISNULL": true, + "JOIN": true, + "LEFT": true, + "LIKE": true, + "NATURAL": true, + "NONE": true, + "NOTNULL": true, + "OUTER": true, + "OVERLAPS": true, + "RIGHT": true, + "SIMILAR": true, + "FAMILY": true, - // col_name_keyword - // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#col_name_keyword - "ANNOTATE_TYPE": true, - "BETWEEN": true, - "BIGINT": true, - "BIT": true, - "BOOLEAN": true, - "CHAR": true, - "CHARACTER": true, - "CHARACTERISTICS": true, - "COALESCE": true, - "DEC": true, - "DECIMAL": true, - "EXISTS": true, - "EXTRACT": true, - "EXTRACT_DURATION": true, - "FLOAT": true, - "GREATEST": true, - "GROUPING": true, - "IF": true, - "IFERROR": true, - "IFNULL": true, - "INT": true, - "INTEGER": true, - "INTERVAL": true, - "ISERROR": true, - "LEAST": true, - "NULLIF": true, - "NUMERIC": true, - "OUT": true, - "OVERLAY": true, - "POSITION": true, - "PRECISION": true, - "REAL": true, - "ROW": true, - "SMALLINT": true, - "SUBSTRING": true, - "TIME": true, - "TIMETZ": true, - "TIMESTAMP": true, - "TIMESTAMPTZ": true, - "TREAT": true, - "TRIM": true, - "VALUES": true, - "VARBIT": true, - "VARCHAR": true, - "VIRTUAL": true, - "WORK": true, + // col_name_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#col_name_keyword + "ANNOTATE_TYPE": true, + "BETWEEN": true, + "BIGINT": true, + "BIT": true, + "BOOLEAN": true, + "CHAR": true, + "CHARACTER": true, + "CHARACTERISTICS": true, + "COALESCE": true, + "DEC": true, + "DECIMAL": true, + "EXISTS": true, + "EXTRACT": true, + "EXTRACT_DURATION": true, + "FLOAT": true, + "GREATEST": true, + "GROUPING": true, + "IF": true, + "IFERROR": true, + "IFNULL": true, + "INT": true, + "INTEGER": true, + "INTERVAL": true, + "ISERROR": true, + "LEAST": true, + "NULLIF": true, + "NUMERIC": true, + "OUT": true, + "OVERLAY": true, + "POSITION": true, + "PRECISION": true, + "REAL": true, + "ROW": true, + "SMALLINT": true, + "SUBSTRING": true, + "TIME": true, + "TIMETZ": true, + "TIMESTAMP": true, + "TIMESTAMPTZ": true, + "TREAT": true, + "TRIM": true, + "VALUES": true, + "VARBIT": true, + "VARCHAR": true, + "VIRTUAL": true, + "WORK": true, - // unreserved_keyword - // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#unreserved_keyword - "ABORT": true, - "ACTION": true, - "ADD": true, - "ADMIN": true, - "AGGREGATE": true, - "ALTER": true, - "AT": true, - "AUTOMATIC": true, - "AUTHORIZATION": true, - "BACKUP": true, - "BEGIN": true, - "BIGSERIAL": true, - "BLOB": true, - "BOOL": true, - "BUCKET_COUNT": true, - "BUNDLE": true, - "BY": true, - "BYTEA": true, - "BYTES": true, - "CACHE": true, - "CANCEL": true, - "CASCADE": true, - "CHANGEFEED": true, - "CLUSTER": true, - "COLUMNS": true, - "COMMENT": true, - "COMMIT": true, - "COMMITTED": true, - "COMPACT": true, - "COMPLETE": true, - "CONFLICT": true, - "CONFIGURATION": true, - "CONFIGURATIONS": true, - "CONFIGURE": true, - "CONSTRAINTS": true, - "CONVERSION": true, - "COPY": true, - "COVERING": true, - "CREATEROLE": true, - "CUBE": true, - "CURRENT": true, - "CYCLE": true, - "DATA": true, - "DATABASE": true, - "DATABASES": true, - "DATE": true, - "DAY": true, - "DEALLOCATE": true, - "DELETE": true, - "DEFERRED": true, - "DISCARD": true, - "DOMAIN": true, - "DOUBLE": true, - "DROP": true, - "ENCODING": true, - "ENUM": true, - "ESCAPE": true, - "EXCLUDE": true, - "EXECUTE": true, - "EXPERIMENTAL": true, - "EXPERIMENTAL_AUDIT": true, - "EXPERIMENTAL_FINGERPRINTS": true, - "EXPERIMENTAL_RELOCATE": true, - "EXPERIMENTAL_REPLICA": true, - "EXPIRATION": true, - "EXPLAIN": true, - "EXPORT": true, - "EXTENSION": true, - "FILES": true, - "FILTER": true, - "FIRST": true, - "FLOAT4": true, - "FLOAT8": true, - "FOLLOWING": true, - "FORCE_INDEX": true, - "FUNCTION": true, - "GLOBAL": true, - "GRANTS": true, - "GROUPS": true, - "HASH": true, - "HIGH": true, - "HISTOGRAM": true, - "HOUR": true, - "IMMEDIATE": true, - "IMPORT": true, - "INCLUDE": true, - "INCREMENT": true, - "INCREMENTAL": true, - "INDEXES": true, - "INET": true, - "INJECT": true, - "INSERT": true, - "INT2": true, - "INT2VECTOR": true, - "INT4": true, - "INT8": true, - "INT64": true, - "INTERLEAVE": true, - "INVERTED": true, - "ISOLATION": true, - "JOB": true, - "JOBS": true, - "JSON": true, - "JSONB": true, - "KEY": true, - "KEYS": true, - "KV": true, - "LANGUAGE": true, - "LAST": true, - "LC_COLLATE": true, - "LC_CTYPE": true, - "LEASE": true, - "LESS": true, - "LEVEL": true, - "LIST": true, - "LOCAL": true, - "LOCKED": true, - "LOGIN": true, - "LOOKUP": true, - "LOW": true, - "MATCH": true, - "MATERIALIZED": true, - "MAXVALUE": true, - "MERGE": true, - "MINUTE": true, - "MINVALUE": true, - "MONTH": true, - "NAMES": true, - "NAN": true, - "NAME": true, - "NEXT": true, - "NO": true, - "NORMAL": true, - "NO_INDEX_JOIN": true, - "NOCREATEROLE": true, - "NOLOGIN": true, - "NOWAIT": true, - "NULLS": true, - "IGNORE_FOREIGN_KEYS": true, - "OF": true, - "OFF": true, - "OID": true, - "OIDS": true, - "OIDVECTOR": true, - "OPERATOR": true, - "OPT": true, - "OPTION": true, - "OPTIONS": true, - "ORDINALITY": true, - "OTHERS": true, - "OVER": true, - "OWNED": true, - "PARENT": true, - "PARTIAL": true, - "PARTITION": true, - "PARTITIONS": true, - "PASSWORD": true, - "PAUSE": true, - "PHYSICAL": true, - "PLAN": true, - "PLANS": true, - "PRECEDING": true, - "PREPARE": true, - "PRESERVE": true, - "PRIORITY": true, - "PUBLIC": true, - "PUBLICATION": true, - "QUERIES": true, - "QUERY": true, - "RANGE": true, - "RANGES": true, - "READ": true, - "RECURSIVE": true, - "REF": true, - "REGCLASS": true, - "REGPROC": true, - "REGPROCEDURE": true, - "REGNAMESPACE": true, - "REGTYPE": true, - "REINDEX": true, - "RELEASE": true, - "RENAME": true, - "REPEATABLE": true, - "REPLACE": true, - "RESET": true, - "RESTORE": true, - "RESTRICT": true, - "RESUME": true, - "REVOKE": true, - "ROLE": true, - "ROLES": true, - "ROLLBACK": true, - "ROLLUP": true, - "ROWS": true, - "RULE": true, - "SETTING": true, - "SETTINGS": true, - "STATUS": true, - "SAVEPOINT": true, - "SCATTER": true, - "SCHEMA": true, - "SCHEMAS": true, - "SCRUB": true, - "SEARCH": true, - "SECOND": true, - "SERIAL": true, - "SERIALIZABLE": true, - "SERIAL2": true, - "SERIAL4": true, - "SERIAL8": true, - "SEQUENCE": true, - "SEQUENCES": true, - "SERVER": true, - "SESSION": true, - "SESSIONS": true, - "SET": true, - "SHARE": true, - "SHOW": true, - "SIMPLE": true, - "SKIP": true, - "SMALLSERIAL": true, - "SNAPSHOT": true, - "SPLIT": true, - "SQL": true, - "START": true, - "STATISTICS": true, - "STDIN": true, - "STORE": true, - "STORED": true, - "STORING": true, - "STRICT": true, - "STRING": true, - "SUBSCRIPTION": true, - "SYNTAX": true, - "SYSTEM": true, - "TABLES": true, - "TEMP": true, - "TEMPLATE": true, - "TEMPORARY": true, - "TESTING_RELOCATE": true, - "TEXT": true, - "TIES": true, - "TRACE": true, - "TRANSACTION": true, - "TRIGGER": true, - "TRUNCATE": true, - "TRUSTED": true, - "TYPE": true, - "THROTTLING": true, - "UNBOUNDED": true, - "UNCOMMITTED": true, - "UNKNOWN": true, - "UNLOGGED": true, - "UNSPLIT": true, - "UNTIL": true, - "UPDATE": true, - "UPSERT": true, - "UUID": true, - "USE": true, - "USERS": true, - "VALID": true, - "VALIDATE": true, - "VALUE": true, - "VARYING": true, - "VIEW": true, - "WITHIN": true, - "WITHOUT": true, - "WRITE": true, - "YEAR": true, - "ZONE": true, - } -) + // unreserved_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#unreserved_keyword + "ABORT": true, + "ACTION": true, + "ADD": true, + "ADMIN": true, + "AGGREGATE": true, + "ALTER": true, + "AT": true, + "AUTOMATIC": true, + "AUTHORIZATION": true, + "BACKUP": true, + "BEGIN": true, + "BIGSERIAL": true, + "BLOB": true, + "BOOL": true, + "BUCKET_COUNT": true, + "BUNDLE": true, + "BY": true, + "BYTEA": true, + "BYTES": true, + "CACHE": true, + "CANCEL": true, + "CASCADE": true, + "CHANGEFEED": true, + "CLUSTER": true, + "COLUMNS": true, + "COMMENT": true, + "COMMIT": true, + "COMMITTED": true, + "COMPACT": true, + "COMPLETE": true, + "CONFLICT": true, + "CONFIGURATION": true, + "CONFIGURATIONS": true, + "CONFIGURE": true, + "CONSTRAINTS": true, + "CONVERSION": true, + "COPY": true, + "COVERING": true, + "CREATEROLE": true, + "CUBE": true, + "CURRENT": true, + "CYCLE": true, + "DATA": true, + "DATABASE": true, + "DATABASES": true, + "DATE": true, + "DAY": true, + "DEALLOCATE": true, + "DELETE": true, + "DEFERRED": true, + "DISCARD": true, + "DOMAIN": true, + "DOUBLE": true, + "DROP": true, + "ENCODING": true, + "ENUM": true, + "ESCAPE": true, + "EXCLUDE": true, + "EXECUTE": true, + "EXPERIMENTAL": true, + "EXPERIMENTAL_AUDIT": true, + "EXPERIMENTAL_FINGERPRINTS": true, + "EXPERIMENTAL_RELOCATE": true, + "EXPERIMENTAL_REPLICA": true, + "EXPIRATION": true, + "EXPLAIN": true, + "EXPORT": true, + "EXTENSION": true, + "FILES": true, + "FILTER": true, + "FIRST": true, + "FLOAT4": true, + "FLOAT8": true, + "FOLLOWING": true, + "FORCE_INDEX": true, + "FUNCTION": true, + "GLOBAL": true, + "GRANTS": true, + "GROUPS": true, + "HASH": true, + "HIGH": true, + "HISTOGRAM": true, + "HOUR": true, + "IMMEDIATE": true, + "IMPORT": true, + "INCLUDE": true, + "INCREMENT": true, + "INCREMENTAL": true, + "INDEXES": true, + "INET": true, + "INJECT": true, + "INSERT": true, + "INT2": true, + "INT2VECTOR": true, + "INT4": true, + "INT8": true, + "INT64": true, + "INTERLEAVE": true, + "INVERTED": true, + "ISOLATION": true, + "JOB": true, + "JOBS": true, + "JSON": true, + "JSONB": true, + "KEY": true, + "KEYS": true, + "KV": true, + "LANGUAGE": true, + "LAST": true, + "LC_COLLATE": true, + "LC_CTYPE": true, + "LEASE": true, + "LESS": true, + "LEVEL": true, + "LIST": true, + "LOCAL": true, + "LOCKED": true, + "LOGIN": true, + "LOOKUP": true, + "LOW": true, + "MATCH": true, + "MATERIALIZED": true, + "MAXVALUE": true, + "MERGE": true, + "MINUTE": true, + "MINVALUE": true, + "MONTH": true, + "NAMES": true, + "NAN": true, + "NAME": true, + "NEXT": true, + "NO": true, + "NORMAL": true, + "NO_INDEX_JOIN": true, + "NOCREATEROLE": true, + "NOLOGIN": true, + "NOWAIT": true, + "NULLS": true, + "IGNORE_FOREIGN_KEYS": true, + "OF": true, + "OFF": true, + "OID": true, + "OIDS": true, + "OIDVECTOR": true, + "OPERATOR": true, + "OPT": true, + "OPTION": true, + "OPTIONS": true, + "ORDINALITY": true, + "OTHERS": true, + "OVER": true, + "OWNED": true, + "PARENT": true, + "PARTIAL": true, + "PARTITION": true, + "PARTITIONS": true, + "PASSWORD": true, + "PAUSE": true, + "PHYSICAL": true, + "PLAN": true, + "PLANS": true, + "PRECEDING": true, + "PREPARE": true, + "PRESERVE": true, + "PRIORITY": true, + "PUBLIC": true, + "PUBLICATION": true, + "QUERIES": true, + "QUERY": true, + "RANGE": true, + "RANGES": true, + "READ": true, + "RECURSIVE": true, + "REF": true, + "REGCLASS": true, + "REGPROC": true, + "REGPROCEDURE": true, + "REGNAMESPACE": true, + "REGTYPE": true, + "REINDEX": true, + "RELEASE": true, + "RENAME": true, + "REPEATABLE": true, + "REPLACE": true, + "RESET": true, + "RESTORE": true, + "RESTRICT": true, + "RESUME": true, + "REVOKE": true, + "ROLE": true, + "ROLES": true, + "ROLLBACK": true, + "ROLLUP": true, + "ROWS": true, + "RULE": true, + "SETTING": true, + "SETTINGS": true, + "STATUS": true, + "SAVEPOINT": true, + "SCATTER": true, + "SCHEMA": true, + "SCHEMAS": true, + "SCRUB": true, + "SEARCH": true, + "SECOND": true, + "SERIAL": true, + "SERIALIZABLE": true, + "SERIAL2": true, + "SERIAL4": true, + "SERIAL8": true, + "SEQUENCE": true, + "SEQUENCES": true, + "SERVER": true, + "SESSION": true, + "SESSIONS": true, + "SET": true, + "SHARE": true, + "SHOW": true, + "SIMPLE": true, + "SKIP": true, + "SMALLSERIAL": true, + "SNAPSHOT": true, + "SPLIT": true, + "SQL": true, + "START": true, + "STATISTICS": true, + "STDIN": true, + "STORE": true, + "STORED": true, + "STORING": true, + "STRICT": true, + "STRING": true, + "SUBSCRIPTION": true, + "SYNTAX": true, + "SYSTEM": true, + "TABLES": true, + "TEMP": true, + "TEMPLATE": true, + "TEMPORARY": true, + "TESTING_RELOCATE": true, + "TEXT": true, + "TIES": true, + "TRACE": true, + "TRANSACTION": true, + "TRIGGER": true, + "TRUNCATE": true, + "TRUSTED": true, + "TYPE": true, + "THROTTLING": true, + "UNBOUNDED": true, + "UNCOMMITTED": true, + "UNKNOWN": true, + "UNLOGGED": true, + "UNSPLIT": true, + "UNTIL": true, + "UPDATE": true, + "UPSERT": true, + "UUID": true, + "USE": true, + "USERS": true, + "VALID": true, + "VALIDATE": true, + "VALUE": true, + "VARYING": true, + "VIEW": true, + "WITHIN": true, + "WITHOUT": true, + "WRITE": true, + "YEAR": true, + "ZONE": true, +} diff --git a/physical/consul/consul.go b/physical/consul/consul.go index f56d8cfdd5..91ffdf2fff 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -32,10 +32,12 @@ const ( ) // Verify ConsulBackend satisfies the correct interfaces -var _ physical.Backend = (*ConsulBackend)(nil) -var _ physical.HABackend = (*ConsulBackend)(nil) -var _ physical.Lock = (*ConsulLock)(nil) -var _ physical.Transactional = (*ConsulBackend)(nil) +var ( + _ physical.Backend = (*ConsulBackend)(nil) + _ physical.HABackend = (*ConsulBackend)(nil) + _ physical.Lock = (*ConsulLock)(nil) + _ physical.Transactional = (*ConsulBackend)(nil) +) // ConsulBackend is a physical backend that stores data at specific // prefix within Consul. It is used for most production situations as diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index 8fbfed413a..f5bf6b77f0 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -28,9 +28,11 @@ type CouchDBBackend struct { } // Verify CouchDBBackend satisfies the correct interfaces -var _ physical.Backend = (*CouchDBBackend)(nil) -var _ physical.PseudoTransactional = (*CouchDBBackend)(nil) -var _ physical.PseudoTransactional = (*TransactionalCouchDBBackend)(nil) +var ( + _ physical.Backend = (*CouchDBBackend)(nil) + _ physical.PseudoTransactional = (*CouchDBBackend)(nil) + _ physical.PseudoTransactional = (*TransactionalCouchDBBackend)(nil) +) type couchDBClient struct { endpoint string diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index ff5f7a2bcf..3f79ef781c 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -73,9 +73,11 @@ const ( ) // Verify DynamoDBBackend satisfies the correct interfaces -var _ physical.Backend = (*DynamoDBBackend)(nil) -var _ physical.HABackend = (*DynamoDBBackend)(nil) -var _ physical.Lock = (*DynamoDBLock)(nil) +var ( + _ physical.Backend = (*DynamoDBBackend)(nil) + _ physical.HABackend = (*DynamoDBBackend)(nil) + _ physical.Lock = (*DynamoDBLock)(nil) +) // DynamoDBBackend is a physical backend that stores data in // a DynamoDB table. It can be run in high-availability mode @@ -177,7 +179,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac if dynamodbMaxRetryString == "" { dynamodbMaxRetryString = conf["dynamodb_max_retries"] } - var dynamodbMaxRetry = aws.UseServiceDefaultRetries + dynamodbMaxRetry := aws.UseServiceDefaultRetries if dynamodbMaxRetryString != "" { var err error dynamodbMaxRetry, err = strconv.Atoi(dynamodbMaxRetryString) diff --git a/physical/dynamodb/dynamodb_test.go b/physical/dynamodb/dynamodb_test.go index 5236ce6ed0..ae6722e245 100644 --- a/physical/dynamodb/dynamodb_test.go +++ b/physical/dynamodb/dynamodb_test.go @@ -48,7 +48,7 @@ func TestDynamoDBBackend(t *testing.T) { conn := dynamodb.New(awsSession) - var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt) defer func() { @@ -138,7 +138,7 @@ func TestDynamoDBHABackend(t *testing.T) { conn := dynamodb.New(awsSession) - var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt) defer func() { diff --git a/physical/etcd/etcd2.go b/physical/etcd/etcd2.go index b483ff2895..d329a92b36 100644 --- a/physical/etcd/etcd2.go +++ b/physical/etcd/etcd2.go @@ -57,9 +57,11 @@ type Etcd2Backend struct { } // Verify Etcd2Backend satisfies the correct interfaces -var _ physical.Backend = (*Etcd2Backend)(nil) -var _ physical.HABackend = (*Etcd2Backend)(nil) -var _ physical.Lock = (*Etcd2Lock)(nil) +var ( + _ physical.Backend = (*Etcd2Backend)(nil) + _ physical.HABackend = (*Etcd2Backend)(nil) + _ physical.Lock = (*Etcd2Lock)(nil) +) func newEtcd2Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { // Get the etcd path form the configuration. diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index 533f220cdc..942eaa5c93 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -38,9 +38,11 @@ type EtcdBackend struct { } // Verify EtcdBackend satisfies the correct interfaces -var _ physical.Backend = (*EtcdBackend)(nil) -var _ physical.HABackend = (*EtcdBackend)(nil) -var _ physical.Lock = (*EtcdLock)(nil) +var ( + _ physical.Backend = (*EtcdBackend)(nil) + _ physical.HABackend = (*EtcdBackend)(nil) + _ physical.Lock = (*EtcdLock)(nil) +) // newEtcd3Backend constructs a etcd3 backend. func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { @@ -358,7 +360,6 @@ func (c *EtcdLock) Value() (bool, string, error) { resp, err := c.etcd.Get(ctx, c.prefix, clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend)) - if err != nil { return false, "", err } diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go index a4e2f10a60..ad43a487d5 100644 --- a/physical/foundationdb/foundationdb.go +++ b/physical/foundationdb/foundationdb.go @@ -3,16 +3,15 @@ package foundationdb import ( + "bytes" "context" + "encoding/binary" "fmt" "strconv" "strings" "sync" "time" - "bytes" - "encoding/binary" - log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" @@ -53,10 +52,12 @@ var ( ) // Verify FDBBackend satisfies the correct interfaces -var _ physical.Backend = (*FDBBackend)(nil) -var _ physical.Transactional = (*FDBBackend)(nil) -var _ physical.HABackend = (*FDBBackend)(nil) -var _ physical.Lock = (*FDBBackendLock)(nil) +var ( + _ physical.Backend = (*FDBBackend)(nil) + _ physical.Transactional = (*FDBBackend)(nil) + _ physical.HABackend = (*FDBBackend)(nil) + _ physical.Lock = (*FDBBackendLock)(nil) +) // FDBBackend is a physical backend that stores data at a specific // prefix within FoundationDB. @@ -424,7 +425,6 @@ func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) return nil, nil }) - if err != nil { return errwrap.Wrapf("transaction failed: {{err}}", err) } @@ -477,7 +477,6 @@ func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, erro return value, nil }) - if err != nil { return nil, errwrap.Wrapf(fmt.Sprintf("get failed for item %s: {{err}}", key), err) } @@ -551,7 +550,6 @@ func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error) return dirList, nil }) - if err != nil { return nil, errwrap.Wrapf(fmt.Sprintf("could not list prefix %s: {{err}}", prefix), err) } @@ -690,7 +688,6 @@ func (fl *FDBBackendLock) acquireTryLock(acquired chan struct{}, errors chan err return true, nil }) - if err != nil { errors <- err return false, err @@ -752,7 +749,6 @@ func (fl *FDBBackendLock) maintainLock(lost <-chan struct{}) { return nil, nil }) - if err != nil { fl.f.logger.Error("lock maintain", "error", err) } @@ -789,7 +785,6 @@ func (fl *FDBBackendLock) watchLock(lost chan struct{}) { return future, nil }) - if err != nil { fl.f.logger.Error("lock watch", "error", err) break @@ -859,7 +854,6 @@ func (fl *FDBBackendLock) Unlock() error { return nil, nil }) - if err != nil { return errwrap.Wrapf("unlock failed: {{err}}", err) } @@ -876,7 +870,6 @@ func (fl *FDBBackendLock) Value() (bool, string, error) { return tupleContent, nil }) - if err != nil { return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed for lock %s: {{err}}", fl.key), err) } diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go index 84d2dcb3a9..39f30f7e0b 100644 --- a/physical/gcs/gcs_ha.go +++ b/physical/gcs/gcs_ha.go @@ -17,8 +17,10 @@ import ( ) // Verify Backend satisfies the correct interfaces -var _ physical.HABackend = (*Backend)(nil) -var _ physical.Lock = (*Lock)(nil) +var ( + _ physical.HABackend = (*Backend)(nil) + _ physical.Lock = (*Lock)(nil) +) const ( // LockRenewInterval is the time to wait between lock renewals. diff --git a/physical/manta/manta.go b/physical/manta/manta.go index 4b687694d1..a02bf3d479 100644 --- a/physical/manta/manta.go +++ b/physical/manta/manta.go @@ -233,8 +233,8 @@ func (m *MantaBackend) List(ctx context.Context, prefix string) ([]string, error } } - //We need to check to see if there is something more than just the `value` file - //if the length of the children is: + // We need to check to see if there is something more than just the `value` file + // if the length of the children is: // > 1 and includes the value `index` then we need to add foo and foo/ // = 1 and the value is `index` then we need to add foo // = 1 and the value is not `index` then we need to add foo/ diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index ea723a27c0..f447b0355e 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -46,7 +46,6 @@ func TestMSSQLBackend(t *testing.T) { "username": username, "password": password, }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } @@ -98,7 +97,6 @@ func TestMSSQLBackend_schema(t *testing.T) { "username": username, "password": password, }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go index eb83923dd9..41393977b0 100644 --- a/physical/mysql/mysql.go +++ b/physical/mysql/mysql.go @@ -28,9 +28,11 @@ import ( ) // Verify MySQLBackend satisfies the correct interfaces -var _ physical.Backend = (*MySQLBackend)(nil) -var _ physical.HABackend = (*MySQLBackend)(nil) -var _ physical.Lock = (*MySQLHALock)(nil) +var ( + _ physical.Backend = (*MySQLBackend)(nil) + _ physical.HABackend = (*MySQLBackend)(nil) + _ physical.Lock = (*MySQLHALock)(nil) +) // Unreserved tls key // Reserved values are "true", "false", "skip-verify" @@ -103,7 +105,6 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen // Check table exists var tableExist bool tableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", table, database) - if err != nil { return nil, errwrap.Wrapf("failed to check mysql table exist: {{err}}", err) } @@ -148,7 +149,6 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen // Check table exists var lockTableExist bool lockTableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", locktable, database) - if err != nil { return nil, errwrap.Wrapf("failed to check mysql table exist: {{err}}", err) } @@ -514,7 +514,6 @@ func (i *MySQLHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { func (i *MySQLHALock) attemptLock(key, value string, didLock chan struct{}, failLock chan error, releaseCh chan bool) { lock, err := NewMySQLLock(i.in, i.logger, key, value) - if err != nil { failLock <- err return diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go index 27d596b95e..86373e9162 100644 --- a/physical/mysql/mysql_test.go +++ b/physical/mysql/mysql_test.go @@ -59,6 +59,7 @@ func TestMySQLPlaintextCatch(t *testing.T) { t.Fatalf("No warning of plaintext credentials occurred") } } + func TestMySQLBackend(t *testing.T) { address := os.Getenv("MYSQL_ADDR") if address == "" { @@ -90,7 +91,6 @@ func TestMySQLBackend(t *testing.T) { "plaintext_connection_allowed": "true", "max_connection_lifetime": "1", }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } diff --git a/physical/oci/oci_ha.go b/physical/oci/oci_ha.go index 5ae94ce675..9fe3012810 100644 --- a/physical/oci/oci_ha.go +++ b/physical/oci/oci_ha.go @@ -24,8 +24,10 @@ import ( // over high availability of the primary instance // Verify Backend satisfies the correct interfaces -var _ physical.HABackend = (*Backend)(nil) -var _ physical.Lock = (*Lock)(nil) +var ( + _ physical.HABackend = (*Backend)(nil) + _ physical.Lock = (*Lock)(nil) +) const ( // LockRenewInterval is the time to wait between lock renewals. diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index 596b6c519b..669aba98da 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -43,8 +43,10 @@ var _ physical.Backend = (*PostgreSQLBackend)(nil) // With distinction using central postgres clock, hereby avoiding // possible issues with multiple clocks // -var _ physical.HABackend = (*PostgreSQLBackend)(nil) -var _ physical.Lock = (*PostgreSQLLock)(nil) +var ( + _ physical.HABackend = (*PostgreSQLBackend)(nil) + _ physical.Lock = (*PostgreSQLLock)(nil) +) // PostgreSQL Backend is a physical backend that stores data // within a PostgreSQL database. diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 131fc1f516..a2c4f2eee2 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -43,7 +43,6 @@ func TestPostgreSQLBackend(t *testing.T) { "table": table, "ha_enabled": hae, }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } @@ -53,7 +52,6 @@ func TestPostgreSQLBackend(t *testing.T) { "table": table, "ha_enabled": hae, }, logger) - if err != nil { t.Fatalf("Failed to create new backend: %v", err) } @@ -119,7 +117,7 @@ func TestConnectionURL(t *testing.T) { conf map[string]string } - var cases = map[string]struct { + cases := map[string]struct { want string input input }{ @@ -232,7 +230,7 @@ func attemptLockTTLTest(t *testing.T, ha physical.HABackend, tries int) bool { } if !held { if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) { - //Our test environment is slow enough that we failed this, retry + // Our test environment is slow enough that we failed this, retry return false } t.Fatalf("should be held") @@ -278,7 +276,7 @@ func attemptLockTTLTest(t *testing.T, ha physical.HABackend, tries int) bool { } if !held { if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) { - //Our test environment is slow enough that we failed this, retry + // Our test environment is slow enough that we failed this, retry return false } t.Fatalf("should be held") diff --git a/physical/raft/chunking_test.go b/physical/raft/chunking_test.go index 27d7b77969..23b5f8fb0d 100644 --- a/physical/raft/chunking_test.go +++ b/physical/raft/chunking_test.go @@ -232,5 +232,4 @@ func TestRaft_Chunking_AppliedIndex(t *testing.T) { t.Fatal("value is corrupt") } } - } diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index 6fdaf4b8f8..4c043c817b 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -46,10 +46,12 @@ var ( ) // Verify FSM satisfies the correct interfaces -var _ physical.Backend = (*FSM)(nil) -var _ physical.Transactional = (*FSM)(nil) -var _ raft.FSM = (*FSM)(nil) -var _ raft.BatchingFSM = (*FSM)(nil) +var ( + _ physical.Backend = (*FSM)(nil) + _ physical.Transactional = (*FSM)(nil) + _ raft.FSM = (*FSM)(nil) + _ raft.BatchingFSM = (*FSM)(nil) +) type restoreCallback func(context.Context) error @@ -94,7 +96,6 @@ type FSM struct { // NewFSM constructs a FSM using the given directory func NewFSM(path string, localID string, logger log.Logger) (*FSM, error) { - // Initialize the latest term, index, and config values latestTerm := new(uint64) latestIndex := new(uint64) @@ -154,7 +155,7 @@ func (f *FSM) openDBFile(dbPath string) error { return errors.New("can not open empty filename") } - boltDB, err := bolt.Open(dbPath, 0666, &bolt.Options{Timeout: 1 * time.Second}) + boltDB, err := bolt.Open(dbPath, 0o666, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return err } @@ -444,7 +445,6 @@ func (f *FSM) Get(ctx context.Context, path string) (*physical.Entry, error) { var found bool err := f.db.View(func(tx *bolt.Tx) error { - value := tx.Bucket(dataBucketName).Get([]byte(path)) if value != nil { found = true diff --git a/physical/raft/fsm_test.go b/physical/raft/fsm_test.go index 7df24bbbc4..75a7ffd872 100644 --- a/physical/raft/fsm_test.go +++ b/physical/raft/fsm_test.go @@ -52,7 +52,7 @@ func TestFSM_Batching(t *testing.T) { Type: raft.LogConfiguration, Data: raft.EncodeConfiguration(raft.Configuration{ Servers: []raft.Server{ - raft.Server{ + { Address: raft.ServerAddress("test"), ID: raft.ServerID("test"), }, diff --git a/physical/raft/logstore/bolt_store.go b/physical/raft/logstore/bolt_store.go index c6690533c4..6b46f7d684 100644 --- a/physical/raft/logstore/bolt_store.go +++ b/physical/raft/logstore/bolt_store.go @@ -10,7 +10,7 @@ import ( const ( // Permissions to use on the db file. This is only used if the // database file does not exist and needs to be created. - dbFileMode = 0600 + dbFileMode = 0o600 ) var ( diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 2d667543ff..b0a086ab14 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -41,10 +41,12 @@ const EnvVaultRaftNodeID = "VAULT_RAFT_NODE_ID" const EnvVaultRaftPath = "VAULT_RAFT_PATH" // Verify RaftBackend satisfies the correct interfaces -var _ physical.Backend = (*RaftBackend)(nil) -var _ physical.Transactional = (*RaftBackend)(nil) -var _ physical.HABackend = (*RaftBackend)(nil) -var _ physical.Lock = (*RaftLock)(nil) +var ( + _ physical.Backend = (*RaftBackend)(nil) + _ physical.Transactional = (*RaftBackend)(nil) + _ physical.HABackend = (*RaftBackend)(nil) + _ physical.Lock = (*RaftLock)(nil) +) var ( // raftLogCacheSize is the maximum number of logs to cache in-memory. @@ -270,7 +272,7 @@ func EnsurePath(path string, dir bool) error { if !dir { path = filepath.Dir(path) } - return os.MkdirAll(path, 0755) + return os.MkdirAll(path, 0o755) } // NewRaftBackend constructs a RaftBackend using the given directory @@ -318,7 +320,7 @@ func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend return nil, err } - if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0600); err != nil { + if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0o600); err != nil { return nil, err } @@ -1112,7 +1114,7 @@ func (b *RaftBackend) RestoreSnapshot(ctx context.Context, metadata raft.Snapsho // snapshot applied to a quorum of nodes. command := &LogData{ Operations: []*LogOperation{ - &LogOperation{ + { OpType: restoreCallbackOp, }, }, @@ -1131,7 +1133,7 @@ func (b *RaftBackend) Delete(ctx context.Context, path string) error { defer metrics.MeasureSince([]string{"raft-storage", "delete"}, time.Now()) command := &LogData{ Operations: []*LogOperation{ - &LogOperation{ + { OpType: deleteOp, Key: path, }, @@ -1177,7 +1179,7 @@ func (b *RaftBackend) Put(ctx context.Context, entry *physical.Entry) error { defer metrics.MeasureSince([]string{"raft-storage", "put"}, time.Now()) command := &LogData{ Operations: []*LogOperation{ - &LogOperation{ + { OpType: putOp, Key: entry.Key, Value: entry.Value, @@ -1388,7 +1390,6 @@ func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { case <-stopCh: return nil, nil } - } l.b.l.RLock() @@ -1406,7 +1407,7 @@ func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { if l.b.raft.State() == raft.Leader { err := l.b.applyLog(context.Background(), &LogData{ Operations: []*LogOperation{ - &LogOperation{ + { OpType: putOp, Key: l.key, Value: l.value, @@ -1430,7 +1431,7 @@ func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { l.b.l.RLock() err := l.b.applyLog(context.Background(), &LogData{ Operations: []*LogOperation{ - &LogOperation{ + { OpType: putOp, Key: l.key, Value: l.value, diff --git a/physical/raft/raft_autopilot.go b/physical/raft/raft_autopilot.go index 39b983e60c..540030f03b 100644 --- a/physical/raft/raft_autopilot.go +++ b/physical/raft/raft_autopilot.go @@ -469,7 +469,7 @@ func (b *RaftBackend) startFollowerHeartbeatTracker() { tickerCh := b.followerHeartbeatTicker.C b.l.RUnlock() - for _ = range tickerCh { + for range tickerCh { b.l.RLock() if b.autopilotConfig.CleanupDeadServers && b.autopilotConfig.DeadServerLastContactThreshold != 0 { b.followerStates.l.RLock() diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go index 8f5af54d54..0b47fcc7c0 100644 --- a/physical/raft/raft_test.go +++ b/physical/raft/raft_test.go @@ -184,7 +184,6 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { return nil }) - if err != nil { t.Fatal(err) } @@ -259,7 +258,7 @@ func TestRaft_TransactionalBackend_LargeValue(t *testing.T) { rand.Read(value) txns := []*physical.TxnEntry{ - &physical.TxnEntry{ + { Operation: physical.PutOperation, Entry: &physical.Entry{ Key: "foo", @@ -391,15 +390,15 @@ func TestRaft_Recovery(t *testing.T) { if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0644) + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0o644) if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0644) + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0o644) if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0644) + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0o644) if err != nil { t.Fatal(err) } @@ -499,7 +498,6 @@ func TestRaft_Backend_Performance(t *testing.T) { if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { t.Fatalf("bad config: %v", localConfig) } - } func BenchmarkDB_Puts(b *testing.B) { diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go index 9eb9b1e69a..f1503cd41c 100644 --- a/physical/raft/snapshot.go +++ b/physical/raft/snapshot.go @@ -85,7 +85,7 @@ func NewBoltSnapshotStore(base string, logger log.Logger, fsm *FSM) (*BoltSnapsh // Ensure our path exists path := filepath.Join(base, snapPath) - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + if err := os.MkdirAll(path, 0o755); err != nil && !os.IsExist(err) { return nil, fmt.Errorf("snapshot path not accessible: %v", err) } @@ -210,7 +210,7 @@ func (f *BoltSnapshotStore) getMetaFromDB(id string) (*raft.SnapshotMeta, error) } filename := filepath.Join(f.path, id, databaseFilename) - boltDB, err := bolt.Open(filename, 0666, &bolt.Options{Timeout: 1 * time.Second}) + boltDB, err := bolt.Open(filename, 0o666, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, err } @@ -323,14 +323,14 @@ func (s *BoltSnapshotSink) writeBoltDBFile() error { s.logger.Info("creating new snapshot", "path", path) // Make the directory - if err := os.MkdirAll(path, 0755); err != nil { + if err := os.MkdirAll(path, 0o755); err != nil { s.logger.Error("failed to make snapshot directory", "error", err) return err } // Create the BoltDB file dbPath := filepath.Join(path, databaseFilename) - boltDB, err := bolt.Open(dbPath, 0666, &bolt.Options{Timeout: 1 * time.Second}) + boltDB, err := bolt.Open(dbPath, 0o666, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return err } diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go index afbc3b3047..b655fc0906 100644 --- a/physical/raft/snapshot_test.go +++ b/physical/raft/snapshot_test.go @@ -136,7 +136,6 @@ func TestRaft_Snapshot_Loading(t *testing.T) { if !bytes.Equal(computed1, computed3) { t.Fatal("hashes did not match") } - } func TestRaft_Snapshot_Index(t *testing.T) { @@ -892,7 +891,7 @@ func TestBoltSnapshotStore_BadPerm(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if err = os.Chmod(dir2, 000); err != nil { + if err = os.Chmod(dir2, 0o00); err != nil { t.Fatalf("err: %s", err) } defer os.Chmod(dir2, 777) // Set perms back for delete diff --git a/physical/s3/s3.go b/physical/s3/s3.go index 4774464799..7c4822a3a3 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -182,7 +182,6 @@ func (s *S3Backend) Put(ctx context.Context, entry *physical.Entry) error { } _, err := s.client.PutObject(putObjectInput) - if err != nil { return err } @@ -257,7 +256,6 @@ func (s *S3Backend) Delete(ctx context.Context, key string) error { Bucket: aws.String(s.bucket), Key: aws.String(key), }) - if err != nil { return err } @@ -315,7 +313,6 @@ func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) { } return true }) - if err != nil { return nil, err } diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go index b611b213aa..f083d787f1 100644 --- a/physical/s3/s3_test.go +++ b/physical/s3/s3_test.go @@ -62,7 +62,7 @@ func DoS3BackendTest(t *testing.T, kmsKeyId string) { } s3conn := s3.New(sess) - var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() bucket := fmt.Sprintf("vault-s3-testacc-%d", randInt) _, err = s3conn.CreateBucket(&s3.CreateBucketInput{ diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index 3a9d933b8a..8e4e13265f 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -24,8 +24,10 @@ import ( ) // Verify Backend satisfies the correct interfaces -var _ physical.Backend = (*Backend)(nil) -var _ physical.Transactional = (*Backend)(nil) +var ( + _ physical.Backend = (*Backend)(nil) + _ physical.Transactional = (*Backend)(nil) +) const ( // envDatabase is the name of the environment variable to search for the diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go index cc2586f5f2..ab9c9a855c 100644 --- a/physical/spanner/spanner_ha.go +++ b/physical/spanner/spanner_ha.go @@ -16,8 +16,10 @@ import ( ) // Verify Backend satisfies the correct interfaces -var _ physical.HABackend = (*Backend)(nil) -var _ physical.Lock = (*Lock)(nil) +var ( + _ physical.HABackend = (*Backend)(nil) + _ physical.Lock = (*Lock)(nil) +) const ( // LockRenewInterval is the time to wait between lock renewals. diff --git a/physical/swift/swift.go b/physical/swift/swift.go index dc15564fa6..260a5bedc5 100644 --- a/physical/swift/swift.go +++ b/physical/swift/swift.go @@ -160,7 +160,6 @@ func (s *SwiftBackend) Put(ctx context.Context, entry *physical.Entry) error { defer s.permitPool.Release() err := s.client.ObjectPutBytes(s.container, entry.Key, entry.Value, "") - if err != nil { return err } @@ -175,9 +174,9 @@ func (s *SwiftBackend) Get(ctx context.Context, key string) (*physical.Entry, er s.permitPool.Acquire() defer s.permitPool.Release() - //Do a list of names with the key first since eventual consistency means - //it might be deleted, but a node might return a read of bytes which fails - //the physical test + // Do a list of names with the key first since eventual consistency means + // it might be deleted, but a node might return a read of bytes which fails + // the physical test list, err := s.client.ObjectNames(s.container, &swift.ObjectsOpts{Prefix: key}) if err != nil { return nil, err diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go index d94e7e0d3d..47a0fb3eb7 100644 --- a/physical/zookeeper/zookeeper.go +++ b/physical/zookeeper/zookeeper.go @@ -32,9 +32,11 @@ const ( ) // Verify ZooKeeperBackend satisfies the correct interfaces -var _ physical.Backend = (*ZooKeeperBackend)(nil) -var _ physical.HABackend = (*ZooKeeperBackend)(nil) -var _ physical.Lock = (*ZooKeeperHALock)(nil) +var ( + _ physical.Backend = (*ZooKeeperBackend)(nil) + _ physical.HABackend = (*ZooKeeperBackend)(nil) + _ physical.Lock = (*ZooKeeperHALock)(nil) +) // ZooKeeperBackend is a physical backend that stores data at specific // prefix within ZooKeeper. It is used in production situations as diff --git a/physical/zookeeper/zookeeper_test.go b/physical/zookeeper/zookeeper_test.go index 3edc55f104..baaa41fdbf 100644 --- a/physical/zookeeper/zookeeper_test.go +++ b/physical/zookeeper/zookeeper_test.go @@ -20,7 +20,6 @@ func TestZooKeeperBackend(t *testing.T) { } client, _, err := zk.Connect([]string{addr}, time.Second) - if err != nil { t.Fatalf("err: %v", err) } @@ -65,7 +64,6 @@ func TestZooKeeperHABackend(t *testing.T) { } client, _, err := zk.Connect([]string{addr}, time.Second) - if err != nil { t.Fatalf("err: %v", err) } diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go index 338301e562..892c6e9bd3 100644 --- a/plugins/database/hana/hana.go +++ b/plugins/database/hana/hana.go @@ -101,7 +101,6 @@ func (h *HANA) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (respon credsutil.Separator("_"), credsutil.ToUpper(), ) - if err != nil { return dbplugin.NewUserResponse{}, err } diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go index 535bc5386f..b1d3ea5598 100644 --- a/plugins/database/influxdb/connection_producer.go +++ b/plugins/database/influxdb/connection_producer.go @@ -23,7 +23,7 @@ type influxdbConnectionProducer struct { Host string `json:"host" structs:"host" mapstructure:"host"` Username string `json:"username" structs:"username" mapstructure:"username"` Password string `json:"password" structs:"password" mapstructure:"password"` - Port string `json:"port" structs:"port" mapstructure:"port"` //default to 8086 + Port string `json:"port" structs:"port" mapstructure:"port"` // default to 8086 TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go index 636e8175eb..8669961741 100644 --- a/plugins/database/influxdb/influxdb_test.go +++ b/plugins/database/influxdb/influxdb_test.go @@ -64,7 +64,8 @@ func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { "INFLUXDB_DB=vault", "INFLUXDB_ADMIN_USER=" + c.Username, "INFLUXDB_ADMIN_PASSWORD=" + c.Password, - "INFLUXDB_HTTP_AUTH_ENABLED=true"}, + "INFLUXDB_HTTP_AUTH_ENABLED=true", + }, Ports: []string{"8086/tcp"}, }) if err != nil { diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go index 89880a6d1d..c39914cc53 100644 --- a/plugins/database/mongodb/connection_producer_test.go +++ b/plugins/database/mongodb/connection_producer_test.go @@ -48,9 +48,9 @@ func TestInit_clientTLS(t *testing.T) { certhelpers.Parent(caCert), ) - writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0644) - writeFile(t, paths.Join(confDir, "server.pem"), serverCert.CombinedPEM(), 0644) - writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0644) + writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0o644) + writeFile(t, paths.Join(confDir, "server.pem"), serverCert.CombinedPEM(), 0o644) + writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0o644) // ////////////////////////////////////////////////////// // Set up Mongo config file @@ -62,7 +62,7 @@ net: CAFile: /etc/mongo/ca.pem allowInvalidHostnames: true` - writeFile(t, paths.Join(confDir, "mongod.conf"), []byte(rawConf), 0644) + writeFile(t, paths.Join(confDir, "mongod.conf"), []byte(rawConf), 0o644) // ////////////////////////////////////////////////////// // Start Mongo container diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go index 6993242f6e..bfed5fee1f 100644 --- a/plugins/database/mssql/mssql.go +++ b/plugins/database/mssql/mssql.go @@ -392,6 +392,7 @@ BEGIN DROP LOGIN [%s] END ` + const alterLoginSQL = ` ALTER LOGIN [{{username}}] WITH PASSWORD = '{{password}}' ` diff --git a/plugins/database/mysql/connection_producer_test.go b/plugins/database/mysql/connection_producer_test.go index 5fcb040c38..eacf18fabe 100644 --- a/plugins/database/mysql/connection_producer_test.go +++ b/plugins/database/mysql/connection_producer_test.go @@ -95,10 +95,10 @@ func TestInit_clientTLS(t *testing.T) { certhelpers.Parent(caCert), ) - writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0644) - writeFile(t, paths.Join(confDir, "server-cert.pem"), serverCert.Pem, 0644) - writeFile(t, paths.Join(confDir, "server-key.pem"), serverCert.PrivateKeyPEM(), 0644) - writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0644) + writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0o644) + writeFile(t, paths.Join(confDir, "server-cert.pem"), serverCert.Pem, 0o644) + writeFile(t, paths.Join(confDir, "server-key.pem"), serverCert.PrivateKeyPEM(), 0o644) + writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0o644) // ////////////////////////////////////////////////////// // Set up MySQL config file @@ -109,7 +109,7 @@ ssl-ca=/etc/mysql/ca.pem ssl-cert=/etc/mysql/server-cert.pem ssl-key=/etc/mysql/server-key.pem` - writeFile(t, paths.Join(confDir, "my.cnf"), []byte(rawConf), 0644) + writeFile(t, paths.Join(confDir, "my.cnf"), []byte(rawConf), 0o644) // ////////////////////////////////////////////////////// // Start MySQL container diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go index e8ff338cd6..5b7afaa26c 100644 --- a/plugins/database/mysql/mysql_test.go +++ b/plugins/database/mysql/mysql_test.go @@ -487,7 +487,8 @@ func TestMySQL_RotateRootCredentials(t *testing.T) { statements: []string{defaultMySQLRotateCredentialsSQL}, }, "default name": { - statements: []string{` + statements: []string{ + ` ALTER USER '{{username}}'@'%' IDENTIFIED BY '{{password}}';`, }, }, @@ -569,7 +570,8 @@ func TestMySQL_DeleteUser(t *testing.T) { revokeStmts: []string{defaultMysqlRevocationStmts}, }, "default username": { - revokeStmts: []string{` + revokeStmts: []string{ + ` REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{username}}'@'%'; DROP USER '{{username}}'@'%'`, }, @@ -609,7 +611,8 @@ func TestMySQL_DeleteUser(t *testing.T) { RoleName: "test", }, Statements: dbplugin.Statements{ - Commands: []string{` + Commands: []string{ + ` CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; GRANT SELECT ON *.* TO '{{name}}'@'%';`, }, diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index 74df392312..fcba132493 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -263,7 +263,6 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( tx, err := db.BeginTx(ctx, nil) if err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("unable to start transaction: %w", err) - } defer tx.Rollback() diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go index 40d7ee0b4b..ca800a0ddd 100644 --- a/plugins/database/postgresql/postgresql_test.go +++ b/plugins/database/postgresql/postgresql_test.go @@ -92,7 +92,8 @@ func TestPostgreSQL_NewUser(t *testing.T) { RoleName: "test", }, Statements: dbplugin.Statements{ - Commands: []string{` + Commands: []string{ + ` CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' @@ -116,7 +117,8 @@ func TestPostgreSQL_NewUser(t *testing.T) { RoleName: "test", }, Statements: dbplugin.Statements{ - Commands: []string{` + Commands: []string{ + ` CREATE ROLE "{{username}}" WITH LOGIN PASSWORD '{{password}}' @@ -140,7 +142,8 @@ func TestPostgreSQL_NewUser(t *testing.T) { RoleName: "test", }, Statements: dbplugin.Statements{ - Commands: []string{` + Commands: []string{ + ` CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' @@ -165,7 +168,8 @@ func TestPostgreSQL_NewUser(t *testing.T) { RoleName: "test", }, Statements: dbplugin.Statements{ - Commands: []string{` + Commands: []string{ + ` CREATE ROLE "{{username}}" WITH LOGIN PASSWORD '{{password}}' @@ -936,7 +940,8 @@ func TestNewUser_CustomUsername(t *testing.T) { newUserReq := dbplugin.NewUserRequest{ UsernameConfig: test.newUserData, Statements: dbplugin.Statements{ - Commands: []string{` + Commands: []string{ + ` CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go index 4964393b7d..e8262e7cbb 100644 --- a/plugins/database/redshift/redshift.go +++ b/plugins/database/redshift/redshift.go @@ -132,7 +132,6 @@ func (r *RedShift) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (db tx, err := db.BeginTx(ctx, nil) if err != nil { return dbplugin.NewUserResponse{}, err - } defer func() { tx.Rollback() @@ -438,7 +437,7 @@ $$;`) // again, here, we do not stop on error, as we want to remove as // many permissions as possible right now - var lastStmtError *multierror.Error //error + var lastStmtError *multierror.Error // error for _, query := range revocationStmts { if err := dbtxn.ExecuteDBQuery(ctx, db, nil, query); err != nil { lastStmtError = multierror.Append(lastStmtError, err) diff --git a/sdk/database/dbplugin/client.go b/sdk/database/dbplugin/client.go index 907e5352b7..c30c86d0c9 100644 --- a/sdk/database/dbplugin/client.go +++ b/sdk/database/dbplugin/client.go @@ -32,18 +32,17 @@ func (dc *DatabasePluginClient) Close() error { // plugin. The client is wrapped in a DatabasePluginClient object to ensure the // plugin is killed on call of Close(). func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (Database, error) { - // pluginSets is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ // Version 3 used to supports both protocols. We want to keep it around // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "database": new(GRPCDatabasePlugin), }, // Version 4 only supports gRPC - 4: plugin.PluginSet{ + 4: { "database": new(GRPCDatabasePlugin), }, } diff --git a/sdk/database/dbplugin/grpc_transport.go b/sdk/database/dbplugin/grpc_transport.go index bfd848021c..927f524afc 100644 --- a/sdk/database/dbplugin/grpc_transport.go +++ b/sdk/database/dbplugin/grpc_transport.go @@ -65,7 +65,6 @@ func (s *gRPCServer) RevokeUser(ctx context.Context, req *RevokeUserRequest) (*E } func (s *gRPCServer) RotateRootCredentials(ctx context.Context, req *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) { - resp, err := s.impl.RotateRootCredentials(ctx, req.Statements) if err != nil { return nil, err @@ -128,7 +127,6 @@ func (s *gRPCServer) GenerateCredentials(ctx context.Context, _ *Empty) (*Genera } func (s *gRPCServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { - username, password, err := s.impl.SetCredentials(ctx, *req.Statements, *req.StaticUserConfig) if err != nil { return nil, err @@ -222,7 +220,6 @@ func (c *gRPCClient) RevokeUser(ctx context.Context, statements Statements, user Statements: &statements, Username: username, }) - if err != nil { if c.doneCtx.Err() != nil { return ErrPluginShutdown @@ -243,7 +240,6 @@ func (c *gRPCClient) RotateRootCredentials(ctx context.Context, statements []str resp, err := c.client.RotateRootCredentials(ctx, &RotateRootCredentialsRequest{ Statements: statements, }) - if err != nil { if c.doneCtx.Err() != nil { return nil, ErrPluginShutdown @@ -330,6 +326,7 @@ func (c *gRPCClient) GenerateCredentials(ctx context.Context) (string, error) { return resp.Password, nil } + func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, staticUser StaticUserConfig) (username, password string, err error) { ctx, cancel := context.WithCancel(ctx) quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) @@ -340,7 +337,6 @@ func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, StaticUserConfig: &staticUser, Statements: &statements, }) - if err != nil { // Fall back to old call if not implemented grpcStatus, ok := status.FromError(err) diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go index 79bc0637a8..6788e3379d 100644 --- a/sdk/database/dbplugin/plugin.go +++ b/sdk/database/dbplugin/plugin.go @@ -146,8 +146,10 @@ var handshakeConfig = plugin.HandshakeConfig{ MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", } -var _ plugin.Plugin = &GRPCDatabasePlugin{} -var _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +var ( + _ plugin.Plugin = &GRPCDatabasePlugin{} + _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +) // GRPCDatabasePlugin is the plugin.Plugin implementation that only supports GRPC // transport diff --git a/sdk/database/dbplugin/server.go b/sdk/database/dbplugin/server.go index 00e71e128c..4949384baf 100644 --- a/sdk/database/dbplugin/server.go +++ b/sdk/database/dbplugin/server.go @@ -28,12 +28,12 @@ func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.S // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "database": &GRPCDatabasePlugin{ Impl: db, }, }, - 4: plugin.PluginSet{ + 4: { "database": &GRPCDatabasePlugin{ Impl: db, }, diff --git a/sdk/database/dbplugin/v5/conversions_test.go b/sdk/database/dbplugin/v5/conversions_test.go index ca98d4f3dd..374c0c2bed 100644 --- a/sdk/database/dbplugin/v5/conversions_test.go +++ b/sdk/database/dbplugin/v5/conversions_test.go @@ -491,6 +491,7 @@ func intPtr(i int) *int { func float64Ptr(f float64) *float64 { return &f } + func strPtr(str string) *string { return &str } diff --git a/sdk/database/dbplugin/v5/grpc_database_plugin.go b/sdk/database/dbplugin/v5/grpc_database_plugin.go index 24468f72fc..96d296ad79 100644 --- a/sdk/database/dbplugin/v5/grpc_database_plugin.go +++ b/sdk/database/dbplugin/v5/grpc_database_plugin.go @@ -25,8 +25,10 @@ type GRPCDatabasePlugin struct { plugin.NetRPCUnsupportedPlugin } -var _ plugin.Plugin = &GRPCDatabasePlugin{} -var _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +var ( + _ plugin.Plugin = &GRPCDatabasePlugin{} + _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +) func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { proto.RegisterDatabaseServer(s, gRPCServer{impl: d.Impl}) diff --git a/sdk/database/dbplugin/v5/grpc_server_test.go b/sdk/database/dbplugin/v5/grpc_server_test.go index bd04139f49..d3861c2544 100644 --- a/sdk/database/dbplugin/v5/grpc_server_test.go +++ b/sdk/database/dbplugin/v5/grpc_server_test.go @@ -17,10 +17,8 @@ import ( "google.golang.org/grpc/status" ) -var ( - // Before minValidSeconds in ptypes package - invalidExpiration = time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC) -) +// Before minValidSeconds in ptypes package +var invalidExpiration = time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC) func TestGRPCServer_Initialize(t *testing.T) { type testCase struct { diff --git a/sdk/database/dbplugin/v5/plugin_client.go b/sdk/database/dbplugin/v5/plugin_client.go index e74abf9bcd..d2e0961104 100644 --- a/sdk/database/dbplugin/v5/plugin_client.go +++ b/sdk/database/dbplugin/v5/plugin_client.go @@ -34,7 +34,7 @@ func (dc *DatabasePluginClient) Close() error { func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (Database, error) { // pluginSets is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ - 5: plugin.PluginSet{ + 5: { "database": new(GRPCDatabasePlugin), }, } diff --git a/sdk/database/dbplugin/v5/plugin_server.go b/sdk/database/dbplugin/v5/plugin_server.go index d692b9b461..11d04e6450 100644 --- a/sdk/database/dbplugin/v5/plugin_server.go +++ b/sdk/database/dbplugin/v5/plugin_server.go @@ -23,7 +23,7 @@ func ServeConfig(db Database) *plugin.ServeConfig { // pluginSets is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ - 5: plugin.PluginSet{ + 5: { "database": &GRPCDatabasePlugin{ Impl: db, }, diff --git a/sdk/database/helper/connutil/connutil.go b/sdk/database/helper/connutil/connutil.go index 35553d2261..1749b275a2 100644 --- a/sdk/database/helper/connutil/connutil.go +++ b/sdk/database/helper/connutil/connutil.go @@ -6,9 +6,7 @@ import ( "sync" ) -var ( - ErrNotInitialized = errors.New("connection has not been initialized") -) +var ErrNotInitialized = errors.New("connection has not been initialized") // ConnectionProducer can be used as an embedded interface in the Database // definition. It implements the methods dealing with individual database diff --git a/sdk/database/helper/credsutil/credsutil.go b/sdk/database/helper/credsutil/credsutil.go index 12b744fc43..d35d007bef 100644 --- a/sdk/database/helper/credsutil/credsutil.go +++ b/sdk/database/helper/credsutil/credsutil.go @@ -2,9 +2,8 @@ package credsutil import ( "context" - "time" - "fmt" + "time" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/helper/base62" diff --git a/sdk/database/helper/dbutil/dbutil_test.go b/sdk/database/helper/dbutil/dbutil_test.go index ba4827926c..64ca9924d3 100644 --- a/sdk/database/helper/dbutil/dbutil_test.go +++ b/sdk/database/helper/dbutil/dbutil_test.go @@ -58,5 +58,4 @@ func TestStatementCompatibilityHelper(t *testing.T) { if !reflect.DeepEqual(expectedStatements3, StatementCompatibilityHelper(statements3)) { t.Fatalf("mismatch: %#v, %#v", expectedStatements3, statements3) } - } diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index 416d5d856f..0a0894fd01 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -164,7 +164,8 @@ func (b *Backend) HandleExistenceCheck(ctx context.Context, req *logical.Request fd := FieldData{ Raw: raw, - Schema: path.Fields} + Schema: path.Fields, + } err = fd.Validate() if err != nil { @@ -260,7 +261,8 @@ func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*log fd := FieldData{ Raw: raw, - Schema: path.Fields} + Schema: path.Fields, + } if req.Operation != logical.HelpOperation { err := fd.Validate() diff --git a/sdk/framework/backend_test.go b/sdk/framework/backend_test.go index 57f2e20fb8..9abd49d63c 100644 --- a/sdk/framework/backend_test.go +++ b/sdk/framework/backend_test.go @@ -66,7 +66,7 @@ func TestBackendHandleRequest(t *testing.T) { { Pattern: "foo/bar", Fields: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeInt}, + "value": {Type: TypeInt}, }, Callbacks: map[logical.Operation]OperationFunc{ logical.ReadOperation: callback, @@ -75,7 +75,7 @@ func TestBackendHandleRequest(t *testing.T) { { Pattern: "foo/baz/handler", Fields: map[string]*FieldSchema{ - "amount": &FieldSchema{Type: TypeInt}, + "amount": {Type: TypeInt}, }, Operations: map[logical.Operation]OperationHandler{ logical.ReadOperation: &PathOperation{Callback: handler}, @@ -84,7 +84,7 @@ func TestBackendHandleRequest(t *testing.T) { { Pattern: "foo/both/handler", Fields: map[string]*FieldSchema{ - "amount": &FieldSchema{Type: TypeInt}, + "amount": {Type: TypeInt}, }, Callbacks: map[logical.Operation]OperationFunc{ logical.ReadOperation: callback, @@ -228,10 +228,10 @@ func TestBackendHandleRequest_badwrite(t *testing.T) { b := &Backend{ Paths: []*Path{ - &Path{ + { Pattern: "foo/bar", Fields: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeBool}, + "value": {Type: TypeBool}, }, Callbacks: map[logical.Operation]OperationFunc{ logical.UpdateOperation: callback, @@ -249,7 +249,6 @@ func TestBackendHandleRequest_badwrite(t *testing.T) { if err == nil { t.Fatalf("should have thrown a conversion error") } - } func TestBackendHandleRequest_404(t *testing.T) { @@ -263,10 +262,10 @@ func TestBackendHandleRequest_404(t *testing.T) { b := &Backend{ Paths: []*Path{ - &Path{ + { Pattern: `foo/bar`, Fields: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeInt}, + "value": {Type: TypeInt}, }, Callbacks: map[logical.Operation]OperationFunc{ logical.ReadOperation: callback, @@ -288,10 +287,10 @@ func TestBackendHandleRequest_404(t *testing.T) { func TestBackendHandleRequest_help(t *testing.T) { b := &Backend{ Paths: []*Path{ - &Path{ + { Pattern: "foo/bar", Fields: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeInt}, + "value": {Type: TypeInt}, }, HelpSynopsis: "foo", HelpDescription: "bar", @@ -360,6 +359,7 @@ func TestBackendHandleRequest_renewAuthCallback(t *testing.T) { t.Fatalf("bad: %#v", v) } } + func TestBackendHandleRequest_renew(t *testing.T) { called := new(uint32) callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { @@ -485,10 +485,10 @@ func TestBackendHandleRequest_unsupportedOperation(t *testing.T) { b := &Backend{ Paths: []*Path{ - &Path{ + { Pattern: `foo/bar`, Fields: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeInt}, + "value": {Type: TypeInt}, }, Callbacks: map[logical.Operation]OperationFunc{ logical.ReadOperation: callback, @@ -518,10 +518,10 @@ func TestBackendHandleRequest_urlPriority(t *testing.T) { b := &Backend{ Paths: []*Path{ - &Path{ + { Pattern: `foo/(?P\d+)`, Fields: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeInt}, + "value": {Type: TypeInt}, }, Callbacks: map[logical.Operation]OperationFunc{ logical.ReadOperation: callback, @@ -613,13 +613,13 @@ func TestBackendSecret(t *testing.T) { Match bool }{ "no match": { - []*Secret{&Secret{Type: "foo"}}, + []*Secret{{Type: "foo"}}, "bar", false, }, "match": { - []*Secret{&Secret{Type: "foo"}}, + []*Secret{{Type: "foo"}}, "foo", true, }, @@ -747,7 +747,6 @@ func TestFieldSchemaDefaultOrZero(t *testing.T) { } func TestInitializeBackend(t *testing.T) { - var inited bool backend := &Backend{InitializeFunc: func(context.Context, *logical.InitializationRequest) error { inited = true diff --git a/sdk/framework/field_data_test.go b/sdk/framework/field_data_test.go index a34c2b599a..7e3c9d1676 100644 --- a/sdk/framework/field_data_test.go +++ b/sdk/framework/field_data_test.go @@ -17,7 +17,7 @@ func TestFieldDataGet(t *testing.T) { }{ "string type, string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeString}, + "foo": {Type: TypeString}, }, map[string]interface{}{ "foo": "bar", @@ -29,7 +29,7 @@ func TestFieldDataGet(t *testing.T) { "string type, int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeString}, + "foo": {Type: TypeString}, }, map[string]interface{}{ "foo": 42, @@ -41,7 +41,7 @@ func TestFieldDataGet(t *testing.T) { "string type, unset value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeString}, + "foo": {Type: TypeString}, }, map[string]interface{}{}, "foo", @@ -51,7 +51,7 @@ func TestFieldDataGet(t *testing.T) { "string type, unset value with default": { map[string]*FieldSchema{ - "foo": &FieldSchema{ + "foo": { Type: TypeString, Default: "bar", }, @@ -64,7 +64,7 @@ func TestFieldDataGet(t *testing.T) { "lowercase string type, lowercase string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeLowerCaseString}, + "foo": {Type: TypeLowerCaseString}, }, map[string]interface{}{ "foo": "bar", @@ -76,7 +76,7 @@ func TestFieldDataGet(t *testing.T) { "lowercase string type, mixed-case string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeLowerCaseString}, + "foo": {Type: TypeLowerCaseString}, }, map[string]interface{}{ "foo": "BaR", @@ -88,7 +88,7 @@ func TestFieldDataGet(t *testing.T) { "lowercase string type, int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeLowerCaseString}, + "foo": {Type: TypeLowerCaseString}, }, map[string]interface{}{ "foo": 42, @@ -100,7 +100,7 @@ func TestFieldDataGet(t *testing.T) { "lowercase string type, unset value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeLowerCaseString}, + "foo": {Type: TypeLowerCaseString}, }, map[string]interface{}{}, "foo", @@ -110,7 +110,7 @@ func TestFieldDataGet(t *testing.T) { "lowercase string type, unset value with lowercase default": { map[string]*FieldSchema{ - "foo": &FieldSchema{ + "foo": { Type: TypeLowerCaseString, Default: "bar", }, @@ -123,7 +123,7 @@ func TestFieldDataGet(t *testing.T) { "int type, int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeInt}, + "foo": {Type: TypeInt}, }, map[string]interface{}{ "foo": 42, @@ -135,7 +135,7 @@ func TestFieldDataGet(t *testing.T) { "bool type, bool value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeBool}, + "foo": {Type: TypeBool}, }, map[string]interface{}{ "foo": false, @@ -147,7 +147,7 @@ func TestFieldDataGet(t *testing.T) { "map type, map value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeMap}, + "foo": {Type: TypeMap}, }, map[string]interface{}{ "foo": map[string]interface{}{ @@ -163,7 +163,7 @@ func TestFieldDataGet(t *testing.T) { "duration type, string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": "42", @@ -175,7 +175,7 @@ func TestFieldDataGet(t *testing.T) { "duration type, string duration value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": "42m", @@ -187,7 +187,7 @@ func TestFieldDataGet(t *testing.T) { "duration type, int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": 42, @@ -199,7 +199,7 @@ func TestFieldDataGet(t *testing.T) { "duration type, float value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": 42.0, @@ -211,7 +211,7 @@ func TestFieldDataGet(t *testing.T) { "duration type, nil value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": nil, @@ -223,7 +223,7 @@ func TestFieldDataGet(t *testing.T) { "duration type, 0 value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": 0, @@ -235,7 +235,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, positive string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": "42", @@ -247,7 +247,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, positive string duration value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": "42m", @@ -259,7 +259,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, positive int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": 42, @@ -271,7 +271,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, positive float value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": 42.0, @@ -283,7 +283,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, negative string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": "-42", @@ -295,7 +295,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, negative string duration value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": "-42m", @@ -307,7 +307,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, negative int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": -42, @@ -319,7 +319,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, negative float value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": -42.0, @@ -331,7 +331,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, nil value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": nil, @@ -343,7 +343,7 @@ func TestFieldDataGet(t *testing.T) { "signed duration type, 0 value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSignedDurationSecond}, + "foo": {Type: TypeSignedDurationSecond}, }, map[string]interface{}{ "foo": 0, @@ -355,7 +355,7 @@ func TestFieldDataGet(t *testing.T) { "slice type, empty slice": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSlice}, + "foo": {Type: TypeSlice}, }, map[string]interface{}{ "foo": []interface{}{}, @@ -367,7 +367,7 @@ func TestFieldDataGet(t *testing.T) { "slice type, filled, mixed slice": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeSlice}, + "foo": {Type: TypeSlice}, }, map[string]interface{}{ "foo": []interface{}{123, "abc"}, @@ -379,7 +379,7 @@ func TestFieldDataGet(t *testing.T) { "string slice type, filled slice": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeStringSlice}, + "foo": {Type: TypeStringSlice}, }, map[string]interface{}{ "foo": []interface{}{123, "abc"}, @@ -391,7 +391,7 @@ func TestFieldDataGet(t *testing.T) { "string slice type, single value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeStringSlice}, + "foo": {Type: TypeStringSlice}, }, map[string]interface{}{ "foo": "abc", @@ -403,7 +403,7 @@ func TestFieldDataGet(t *testing.T) { "string slice type, empty string": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeStringSlice}, + "foo": {Type: TypeStringSlice}, }, map[string]interface{}{ "foo": "", @@ -415,7 +415,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, empty string": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": "", @@ -427,7 +427,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, comma string with one value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": "value1", @@ -439,7 +439,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, comma string with multi value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": "value1,value2,value3", @@ -451,7 +451,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, nil string slice value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": "", @@ -463,7 +463,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, string slice with one value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": []interface{}{"value1"}, @@ -475,7 +475,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, string slice with multi value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": []interface{}{"value1", "value2", "value3"}, @@ -487,7 +487,7 @@ func TestFieldDataGet(t *testing.T) { "comma string slice type, empty string slice value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaStringSlice}, + "foo": {Type: TypeCommaStringSlice}, }, map[string]interface{}{ "foo": []interface{}{}, @@ -499,7 +499,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, comma int with one value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": 1, @@ -511,7 +511,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, comma int with multi value slice": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": []int{1, 2, 3}, @@ -523,7 +523,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, comma int with multi value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": "1,2,3", @@ -535,7 +535,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, nil int slice value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": "", @@ -547,7 +547,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, int slice with one value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": []interface{}{"1"}, @@ -559,7 +559,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, int slice with multi value strings": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": []interface{}{"1", "2", "3"}, @@ -571,7 +571,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, int slice with multi value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": []interface{}{1, 2, 3}, @@ -583,7 +583,7 @@ func TestFieldDataGet(t *testing.T) { "comma int slice type, empty int slice value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeCommaIntSlice}, + "foo": {Type: TypeCommaIntSlice}, }, map[string]interface{}{ "foo": []interface{}{}, @@ -594,7 +594,7 @@ func TestFieldDataGet(t *testing.T) { }, "name string type, valid string": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeNameString}, + "foo": {Type: TypeNameString}, }, map[string]interface{}{ "foo": "bar", @@ -606,7 +606,7 @@ func TestFieldDataGet(t *testing.T) { "name string type, valid value with special characters": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeNameString}, + "foo": {Type: TypeNameString}, }, map[string]interface{}{ "foo": "bar.baz-bay123", @@ -618,7 +618,7 @@ func TestFieldDataGet(t *testing.T) { "keypair type, valid value map type": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeKVPairs}, + "foo": {Type: TypeKVPairs}, }, map[string]interface{}{ "foo": map[string]interface{}{ @@ -638,7 +638,7 @@ func TestFieldDataGet(t *testing.T) { "keypair type, list of equal sign delim key pairs type": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeKVPairs}, + "foo": {Type: TypeKVPairs}, }, map[string]interface{}{ "foo": []interface{}{"key1=value1", "key2=value2", "key3=1"}, @@ -654,7 +654,7 @@ func TestFieldDataGet(t *testing.T) { "keypair type, single equal sign delim value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeKVPairs}, + "foo": {Type: TypeKVPairs}, }, map[string]interface{}{ "foo": "key1=value1", @@ -1005,7 +1005,7 @@ func TestFieldDataGet_Error(t *testing.T) { }{ "name string type, invalid value with invalid characters": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeNameString}, + "foo": {Type: TypeNameString}, }, map[string]interface{}{ "foo": "bar baz", @@ -1014,7 +1014,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "name string type, invalid value with special characters at beginning": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeNameString}, + "foo": {Type: TypeNameString}, }, map[string]interface{}{ "foo": ".barbaz", @@ -1023,7 +1023,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "name string type, invalid value with special characters at end": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeNameString}, + "foo": {Type: TypeNameString}, }, map[string]interface{}{ "foo": "barbaz-", @@ -1032,7 +1032,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "name string type, empty string": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeNameString}, + "foo": {Type: TypeNameString}, }, map[string]interface{}{ "foo": "", @@ -1041,7 +1041,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "keypair type, csv version empty key name": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeKVPairs}, + "foo": {Type: TypeKVPairs}, }, map[string]interface{}{ "foo": []interface{}{"=value1", "key2=value2", "key3=1"}, @@ -1050,7 +1050,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "duration type, negative string value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": "-42", @@ -1059,7 +1059,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "duration type, negative string duration value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": "-42m", @@ -1068,7 +1068,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "duration type, negative int value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": -42, @@ -1077,7 +1077,7 @@ func TestFieldDataGet_Error(t *testing.T) { }, "duration type, negative float value": { map[string]*FieldSchema{ - "foo": &FieldSchema{Type: TypeDurationSecond}, + "foo": {Type: TypeDurationSecond}, }, map[string]interface{}{ "foo": -42.0, diff --git a/sdk/framework/filter.go b/sdk/framework/filter.go index e042b53908..faaccba2a8 100644 --- a/sdk/framework/filter.go +++ b/sdk/framework/filter.go @@ -2,6 +2,7 @@ package framework import ( "context" + "github.com/hashicorp/vault/sdk/logical" "github.com/ryanuber/go-glob" ) @@ -31,4 +32,4 @@ func GlobListFilter(fieldName string, callback OperationFunc) OperationFunc { } return resp, nil } -} \ No newline at end of file +} diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index 7791ba8fb2..02667cda4d 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -39,12 +39,10 @@ func NewOASDocument() *OASDocument { // If a document has been decoded from JSON or received from a plugin, it will be as a map[string]interface{} // and needs special handling beyond the default mapstructure decoding. func NewOASDocumentFromMap(input map[string]interface{}) (*OASDocument, error) { - // The Responses map uses integer keys (the response code), but once translated into JSON // (e.g. during the plugin transport) these become strings. mapstructure will not coerce these back // to integers without a custom decode hook. decodeHook := func(src reflect.Type, tgt reflect.Type, inputRaw interface{}) (interface{}, error) { - // Only alter data if: // 1. going from string to int // 2. string represent an int in status code range (100-599) @@ -165,7 +163,7 @@ type OASSchema struct { Default interface{} `json:"default,omitempty"` Example interface{} `json:"example,omitempty"` Deprecated bool `json:"deprecated,omitempty"` - //DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` + // DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` DisplayValue interface{} `json:"x-vault-displayValue,omitempty" mapstructure:"x-vault-displayValue,omitempty"` DisplaySensitive bool `json:"x-vault-displaySensitive,omitempty" mapstructure:"x-vault-displaySensitive,omitempty"` DisplayGroup string `json:"x-vault-displayGroup,omitempty" mapstructure:"x-vault-displayGroup,omitempty"` @@ -192,15 +190,17 @@ var OASStdRespNoContent = &OASResponse{ // Both "(leases/)?renew" and "(/(?P.+))?" formats are detected var optRe = regexp.MustCompile(`(?U)\([^(]*\)\?|\(/\(\?P<[^(]*\)\)\?`) -var reqdRe = regexp.MustCompile(`\(?\?P<(\w+)>[^)]*\)?`) // Capture required parameters, e.g. "(?Pregex)" -var altRe = regexp.MustCompile(`\((.*)\|(.*)\)`) // Capture alternation elements, e.g. "(raw/?$|raw/(?P.+))" -var pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", -var cleanCharsRe = regexp.MustCompile("[()^$?]") // Set of regex characters that will be stripped during cleaning -var cleanSuffixRe = regexp.MustCompile(`/\?\$?$`) // Path suffix patterns that will be stripped during cleaning -var wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning -var altFieldsGroupRe = regexp.MustCompile(`\(\?P<\w+>\w+(\|\w+)+\)`) // Match named groups that limit options, e.g. "(?a|b|c)" -var altFieldsRe = regexp.MustCompile(`\w+(\|\w+)+`) // Match an options set, e.g. "a|b|c" -var nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters +var ( + reqdRe = regexp.MustCompile(`\(?\?P<(\w+)>[^)]*\)?`) // Capture required parameters, e.g. "(?Pregex)" + altRe = regexp.MustCompile(`\((.*)\|(.*)\)`) // Capture alternation elements, e.g. "(raw/?$|raw/(?P.+))" + pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", + cleanCharsRe = regexp.MustCompile("[()^$?]") // Set of regex characters that will be stripped during cleaning + cleanSuffixRe = regexp.MustCompile(`/\?\$?$`) // Path suffix patterns that will be stripped during cleaning + wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning + altFieldsGroupRe = regexp.MustCompile(`\(\?P<\w+>\w+(\|\w+)+\)`) // Match named groups that limit options, e.g. "(?a|b|c)" + altFieldsRe = regexp.MustCompile(`\w+(\|\w+)+`) // Match an options set, e.g. "a|b|c" + nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters +) // documentPaths parses all paths in a framework.Backend into OpenAPI paths. func documentPaths(backend *Backend, doc *OASDocument) error { diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index 69e91b7ec3..3efbecc612 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -145,7 +145,6 @@ func TestOpenAPI_Regex(t *testing.T) { t.Fatalf("Clean Regex error (%s). Expected %s, got %s", test.input, test.output, result) } } - }) } @@ -599,7 +598,7 @@ func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) } if diff := deep.Equal(actual, expected); diff != nil { - //fmt.Println(string(docJSON)) // uncomment to debug generated JSON (very helpful when fixing tests) + // fmt.Println(string(docJSON)) // uncomment to debug generated JSON (very helpful when fixing tests) t.Fatal(diff) } } @@ -630,7 +629,6 @@ func expected(name string) string { func mustJSONMarshal(t *testing.T, data interface{}) []byte { j, err := json.MarshalIndent(data, "", " ") - if err != nil { t.Fatal(err) } diff --git a/sdk/framework/path.go b/sdk/framework/path.go index 1339ca6af3..4dc8ca3033 100644 --- a/sdk/framework/path.go +++ b/sdk/framework/path.go @@ -274,7 +274,7 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { // Alphabetize the fields fieldKeys := make([]string, 0, len(p.Fields)) - for k, _ := range p.Fields { + for k := range p.Fields { fieldKeys = append(fieldKeys, k) } sort.Strings(fieldKeys) diff --git a/sdk/framework/path_map.go b/sdk/framework/path_map.go index 8e1b91864b..0cba8ea2fb 100644 --- a/sdk/framework/path_map.go +++ b/sdk/framework/path_map.go @@ -37,7 +37,7 @@ func (p *PathMap) init() { if p.Schema == nil { p.Schema = map[string]*FieldSchema{ - "value": &FieldSchema{ + "value": { Type: TypeString, Description: fmt.Sprintf("Value for %s mapping", p.Name), }, @@ -207,7 +207,7 @@ func (p *PathMap) Paths() []*Path { } return []*Path{ - &Path{ + { Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name), Callbacks: map[logical.Operation]OperationFunc{ @@ -218,7 +218,7 @@ func (p *PathMap) Paths() []*Path { HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name), }, - &Path{ + { Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name), Fields: schema, diff --git a/sdk/framework/path_struct_test.go b/sdk/framework/path_struct_test.go index 4a9aa6b43f..9e81cc2e30 100644 --- a/sdk/framework/path_struct_test.go +++ b/sdk/framework/path_struct_test.go @@ -12,7 +12,7 @@ func TestPathStruct(t *testing.T) { Name: "foo", Path: "bar", Schema: map[string]*FieldSchema{ - "value": &FieldSchema{Type: TypeString}, + "value": {Type: TypeString}, }, Read: true, } diff --git a/sdk/framework/path_test.go b/sdk/framework/path_test.go index c3ccb2a561..ca359d1f57 100644 --- a/sdk/framework/path_test.go +++ b/sdk/framework/path_test.go @@ -94,5 +94,4 @@ func TestPath_Regex(t *testing.T) { t.Fatal(diff) } } - } diff --git a/sdk/framework/policy_map.go b/sdk/framework/policy_map.go index 7657b4b0a9..7befb39954 100644 --- a/sdk/framework/policy_map.go +++ b/sdk/framework/policy_map.go @@ -59,7 +59,7 @@ func (p *PolicyMap) Policies(ctx context.Context, s logical.Storage, names ...st } list := make([]string, 0, len(set)) - for k, _ := range set { + for k := range set { list = append(list, k) } sort.Strings(list) diff --git a/sdk/helper/awsutil/generate_credentials.go b/sdk/helper/awsutil/generate_credentials.go index 8cb8ab154c..1ff60d696b 100644 --- a/sdk/helper/awsutil/generate_credentials.go +++ b/sdk/helper/awsutil/generate_credentials.go @@ -62,7 +62,8 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, AccessKeyID: c.AccessKey, SecretAccessKey: c.SecretKey, SessionToken: c.SessionToken, - }}) + }, + }) c.log(hclog.Debug, "added static credential provider", "AccessKey", c.AccessKey) case c.AccessKey == "" && c.SecretKey == "": @@ -92,7 +93,7 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, c.log(hclog.Warn, "error assuming role", "roleARN", roleARN, "tokenPath", tokenPath, "sessionName", sessionName, "err", err) } - //Add the web identity role credential provider + // Add the web identity role credential provider providers = append(providers, webIdentityProvider) } diff --git a/sdk/helper/awsutil/region_test.go b/sdk/helper/awsutil/region_test.go index 9928d3982a..1a80c4a157 100644 --- a/sdk/helper/awsutil/region_test.go +++ b/sdk/helper/awsutil/region_test.go @@ -186,7 +186,6 @@ func setEnvRegion(t *testing.T, region string) (cleanup func()) { } func setConfigFileRegion(t *testing.T, region string) (cleanup func()) { - var cleanupFuncs []func() cleanup = func() { @@ -220,13 +219,13 @@ func setConfigFileRegion(t *testing.T, region string) (cleanup func()) { }) } else { cleanupFuncs = append(cleanupFuncs, func() { - if err := ioutil.WriteFile(pathToConfig, preExistingConfig, 0644); err != nil { + if err := ioutil.WriteFile(pathToConfig, preExistingConfig, 0o644); err != nil { t.Fatal(err) } }) } fileBody := fmt.Sprintf(testConfigFile, region) - if err := ioutil.WriteFile(pathToConfig, []byte(fileBody), 0644); err != nil { + if err := ioutil.WriteFile(pathToConfig, []byte(fileBody), 0o644); err != nil { t.Fatal(err) } diff --git a/sdk/helper/base62/base62.go b/sdk/helper/base62/base62.go index 57a76d4422..36c6bc9a2e 100644 --- a/sdk/helper/base62/base62.go +++ b/sdk/helper/base62/base62.go @@ -9,8 +9,10 @@ import ( uuid "github.com/hashicorp/go-uuid" ) -const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" -const csLen = byte(len(charset)) +const ( + charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + csLen = byte(len(charset)) +) // Random generates a random string using base-62 characters. // Resulting entropy is ~5.95 bits/character. diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index 12198798e8..c23cca994c 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -643,7 +643,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertB !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId) { result.CAChain = []*CertBlock{ - &CertBlock{ + { Certificate: data.SigningBundle.Certificate, Bytes: data.SigningBundle.CertificateBytes, }, diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 1c7c190777..8a1a1d5fa9 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -51,7 +51,7 @@ type Secret struct { // names rather than official names, to eliminate confusion type PrivateKeyType string -//Well-known PrivateKeyTypes +// Well-known PrivateKeyTypes const ( UnknownPrivateKey PrivateKeyType = "" RSAPrivateKey PrivateKeyType = "rsa" @@ -63,24 +63,24 @@ const ( // client use, or both, which affects which values are set type TLSUsage int -//Well-known TLSUsage types +// Well-known TLSUsage types const ( TLSUnknown TLSUsage = 0 TLSServer TLSUsage = 1 << iota TLSClient ) -//BlockType indicates the serialization format of the key +// BlockType indicates the serialization format of the key type BlockType string -//Well-known formats +// Well-known formats const ( PKCS1Block BlockType = "RSA PRIVATE KEY" PKCS8Block BlockType = "PRIVATE KEY" ECBlock BlockType = "EC PRIVATE KEY" ) -//ParsedPrivateKeyContainer allows common key setting for certs and CSRs +// ParsedPrivateKeyContainer allows common key setting for certs and CSRs type ParsedPrivateKeyContainer interface { SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) } @@ -283,7 +283,7 @@ func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { block.Bytes = p.PrivateKeyBytes result.PrivateKeyType = p.PrivateKeyType - //Handle bundle not parsed by us + // Handle bundle not parsed by us if block.Type == "" { switch p.PrivateKeyType { case ECPrivateKey: diff --git a/sdk/helper/compressutil/compress_test.go b/sdk/helper/compressutil/compress_test.go index 6de3181fbc..050c7ad8a0 100644 --- a/sdk/helper/compressutil/compress_test.go +++ b/sdk/helper/compressutil/compress_test.go @@ -14,31 +14,38 @@ func TestCompressUtil_CompressDecompress(t *testing.T) { compressionConfig CompressionConfig canary byte }{ - {"GZIP default implicit", + { + "GZIP default implicit", CompressionConfig{Type: CompressionTypeGzip}, CompressionCanaryGzip, }, - {"GZIP default explicit", + { + "GZIP default explicit", CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.DefaultCompression}, CompressionCanaryGzip, }, - {"GZIP best speed", + { + "GZIP best speed", CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestSpeed}, CompressionCanaryGzip, }, - {"GZIP best compression", + { + "GZIP best compression", CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestCompression}, CompressionCanaryGzip, }, - {"Snappy", + { + "Snappy", CompressionConfig{Type: CompressionTypeSnappy}, CompressionCanarySnappy, }, - {"LZ4", + { + "LZ4", CompressionConfig{Type: CompressionTypeLZ4}, CompressionCanaryLZ4, }, - {"LZW", + { + "LZW", CompressionConfig{Type: CompressionTypeLZW}, CompressionCanaryLZW, }, diff --git a/sdk/helper/consts/replication.go b/sdk/helper/consts/replication.go index 755ff66f80..a385e40768 100644 --- a/sdk/helper/consts/replication.go +++ b/sdk/helper/consts/replication.go @@ -1,9 +1,9 @@ package consts const ( - //N.B. This needs to be excluded from replication despite the name; it's - //merely saying that this is cluster information for the replicated - //cluster. + // N.B. This needs to be excluded from replication despite the name; it's + // merely saying that this is cluster information for the replicated + // cluster. CoreReplicatedClusterPrefix = "core/cluster/replicated/" CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" @@ -45,7 +45,6 @@ const ( // We verify no change to the above values are made func init() { - if OldReplicationBootstrapping != 3 { panic("Replication Constants have changed") } diff --git a/sdk/helper/dbtxn/dbtxn.go b/sdk/helper/dbtxn/dbtxn.go index 3337bd97b2..fab9e942d7 100644 --- a/sdk/helper/dbtxn/dbtxn.go +++ b/sdk/helper/dbtxn/dbtxn.go @@ -13,7 +13,6 @@ import ( // - config: Optional, may be nil // - query: Required func ExecuteDBQuery(ctx context.Context, db *sql.DB, params map[string]string, query string) error { - parsedQuery := parseQuery(params, query) stmt, err := db.PrepareContext(ctx, parsedQuery) @@ -31,7 +30,6 @@ func ExecuteDBQuery(ctx context.Context, db *sql.DB, params map[string]string, q // - config: Optional, may be nil // - query: Required func ExecuteTxQuery(ctx context.Context, tx *sql.Tx, params map[string]string, query string) error { - parsedQuery := parseQuery(params, query) stmt, err := tx.PrepareContext(ctx, parsedQuery) @@ -51,7 +49,6 @@ func execute(ctx context.Context, stmt *sql.Stmt) error { } func parseQuery(m map[string]string, tpl string) string { - if m == nil || len(m) <= 0 { return tpl } diff --git a/sdk/helper/identitytpl/templating.go b/sdk/helper/identitytpl/templating.go index 3b742dc5bd..85166bf4f2 100644 --- a/sdk/helper/identitytpl/templating.go +++ b/sdk/helper/identitytpl/templating.go @@ -164,7 +164,6 @@ func PopulateString(p PopulateStringInput) (bool, string, error) { } func performTemplating(input string, p *PopulateStringInput) (string, error) { - performAliasTemplating := func(trimmed string, alias *logical.Alias) (string, error) { switch { case trimmed == "id": diff --git a/sdk/helper/identitytpl/templating_test.go b/sdk/helper/identitytpl/templating_test.go index 6d188f096e..41362da9e4 100644 --- a/sdk/helper/identitytpl/templating_test.go +++ b/sdk/helper/identitytpl/templating_test.go @@ -16,7 +16,7 @@ import ( var testNow = time.Now().Add(100 * time.Hour) func TestPopulate_Basic(t *testing.T) { - var tests = []struct { + tests := []struct { mode int name string input string @@ -421,7 +421,7 @@ func TestPopulate_CurrentTime(t *testing.T) { } func TestPopulate_FullObject(t *testing.T) { - var testEntity = &logical.Entity{ + testEntity := &logical.Entity{ ID: "abc-123", Name: "Entity Name", Metadata: map[string]string{ @@ -440,7 +440,7 @@ func TestPopulate_FullObject(t *testing.T) { }, } - var testGroups = []*logical.Group{ + testGroups := []*logical.Group{ {ID: "a08b0c02", Name: "g1"}, {ID: "239bef91", Name: "g2"}, } diff --git a/sdk/helper/jsonutil/json_test.go b/sdk/helper/jsonutil/json_test.go index 739d5e734a..dd33f9bf17 100644 --- a/sdk/helper/jsonutil/json_test.go +++ b/sdk/helper/jsonutil/json_test.go @@ -40,7 +40,7 @@ func TestJSONUtil_CompressDecompressJSON(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for key, _ := range actual { + for key := range actual { delete(actual, key) } diff --git a/sdk/helper/kdf/kdf_test.go b/sdk/helper/kdf/kdf_test.go index 120c90331e..2148257f35 100644 --- a/sdk/helper/kdf/kdf_test.go +++ b/sdk/helper/kdf/kdf_test.go @@ -19,9 +19,11 @@ func TestCounterMode(t *testing.T) { // inp = "\x00\x00\x00\x00"+context+"\x00\x00\x01\x00" // digest = hmac.HMAC(key, inp, hash).digest() // print [ord(x) for x in digest] - expect256 := []byte{219, 25, 238, 6, 185, 236, 180, 64, 248, 152, 251, + expect256 := []byte{ + 219, 25, 238, 6, 185, 236, 180, 64, 248, 152, 251, 153, 79, 5, 141, 222, 66, 200, 66, 143, 40, 3, 101, 221, 206, 163, 102, - 80, 88, 234, 87, 157} + 80, 88, 234, 87, 157, + } for _, l := range []uint32{128, 256, 384, 1024} { out, err := CounterMode(prf, prfLen, key, context, l) @@ -41,7 +43,6 @@ func TestCounterMode(t *testing.T) { t.Fatalf("mis-match") } } - } func TestHMACSHA256PRF(t *testing.T) { @@ -63,9 +64,11 @@ func TestHMACSHA256PRF(t *testing.T) { // key = "".join([chr(x) for x in range(1, 17)]) // hm = hmac.HMAC(key, msg, hash) // print [ord(x) for x in hm.digest()] - expect := []byte{9, 50, 146, 8, 188, 130, 150, 107, 205, 147, 82, 170, + expect := []byte{ + 9, 50, 146, 8, 188, 130, 150, 107, 205, 147, 82, 170, 253, 183, 26, 38, 167, 194, 220, 111, 56, 118, 219, 209, 31, 52, 137, - 90, 246, 133, 191, 124} + 90, 246, 133, 191, 124, + } if !bytes.Equal(expect, out) { t.Fatalf("mis-matched output") } diff --git a/sdk/helper/keysutil/encrypted_key_storage_test.go b/sdk/helper/keysutil/encrypted_key_storage_test.go index 4db7fb5e43..0dc1838426 100644 --- a/sdk/helper/keysutil/encrypted_key_storage_test.go +++ b/sdk/helper/keysutil/encrypted_key_storage_test.go @@ -234,7 +234,6 @@ func TestEncryptedKeysStorage_CRUD(t *testing.T) { if data != nil { t.Fatal("data should be nil") } - } func BenchmarkEncrytedKeyStorage_List(b *testing.B) { diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go index 3a796c58f5..039b05ad05 100644 --- a/sdk/helper/keysutil/lock_manager.go +++ b/sdk/helper/keysutil/lock_manager.go @@ -22,9 +22,7 @@ const ( currentConvergentVersion = 3 ) -var ( - errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") -) +var errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") // PolicyRequest holds values used when requesting a policy. Most values are // only used during an upsert. diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go index f49dbe245c..ba1b437013 100644 --- a/sdk/helper/keysutil/policy.go +++ b/sdk/helper/keysutil/policy.go @@ -312,7 +312,7 @@ type Policy struct { deleted uint32 Name string `json:"name"` - Key []byte `json:"key,omitempty"` //DEPRECATED + Key []byte `json:"key,omitempty"` // DEPRECATED Keys keyEntryMap `json:"keys"` // Derived keys MUST provide a context and the master underlying key is diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go index 40af77d8e6..c8b3f428b0 100644 --- a/sdk/helper/keysutil/policy_test.go +++ b/sdk/helper/keysutil/policy_test.go @@ -133,7 +133,7 @@ func testArchivingUpgradeCommon(t *testing.T, lm *LockManager) { } // Store the initial key in the archive - keysArchive := []KeyEntry{KeyEntry{}, p.Keys["1"]} + keysArchive := []KeyEntry{{}, p.Keys["1"]} checkKeys(t, ctx, p, storage, keysArchive, "initial", 1, 1, 1) for i := 2; i <= 10; i++ { @@ -293,7 +293,7 @@ func testArchivingCommon(t *testing.T, lm *LockManager) { } // Store the initial key in the archive - keysArchive := []KeyEntry{KeyEntry{}, p.Keys["1"]} + keysArchive := []KeyEntry{{}, p.Keys["1"]} checkKeys(t, ctx, p, storage, keysArchive, "initial", 1, 1, 1) for i := 2; i <= 10; i++ { @@ -444,7 +444,7 @@ func Test_StorageErrorSafety(t *testing.T) { } // Store the initial key in the archive - keysArchive := []KeyEntry{KeyEntry{}, p.Keys["1"]} + keysArchive := []KeyEntry{{}, p.Keys["1"]} checkKeys(t, ctx, p, storage, keysArchive, "initial", 1, 1, 1) // We use checkKeys here just for sanity; it doesn't really handle cases of diff --git a/sdk/helper/ldaputil/client_test.go b/sdk/helper/ldaputil/client_test.go index cff985a1c2..199f93a6be 100644 --- a/sdk/helper/ldaputil/client_test.go +++ b/sdk/helper/ldaputil/client_test.go @@ -68,9 +68,9 @@ func TestGetTLSConfigs(t *testing.T) { func TestSIDBytesToString(t *testing.T) { testcases := map[string][]byte{ - "S-1-5-21-2127521184-1604012920-1887927527-72713": []byte{0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x15, 0x00, 0x00, 0x00, 0xA0, 0x65, 0xCF, 0x7E, 0x78, 0x4B, 0x9B, 0x5F, 0xE7, 0x7C, 0x87, 0x70, 0x09, 0x1C, 0x01, 0x00}, - "S-1-1-0": []byte{0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, - "S-1-5": []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, + "S-1-5-21-2127521184-1604012920-1887927527-72713": {0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x15, 0x00, 0x00, 0x00, 0xA0, 0x65, 0xCF, 0x7E, 0x78, 0x4B, 0x9B, 0x5F, 0xE7, 0x7C, 0x87, 0x70, 0x09, 0x1C, 0x01, 0x00}, + "S-1-1-0": {0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + "S-1-5": {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, } for answer, test := range testcases { diff --git a/sdk/helper/logging/logging.go b/sdk/helper/logging/logging.go index 27578e3fd3..a8d30674b1 100644 --- a/sdk/helper/logging/logging.go +++ b/sdk/helper/logging/logging.go @@ -51,7 +51,6 @@ func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { // ParseLogFormat parses the log format from the provided string. func ParseLogFormat(format string) (LogFormat, error) { - switch strings.ToLower(strings.TrimSpace(format)) { case "": return UnspecifiedFormat, nil diff --git a/sdk/helper/logging/logging_test.go b/sdk/helper/logging/logging_test.go index 7c69c9a2f5..3a860d199c 100644 --- a/sdk/helper/logging/logging_test.go +++ b/sdk/helper/logging/logging_test.go @@ -8,7 +8,6 @@ import ( ) func Test_ParseLogFormat(t *testing.T) { - type testData struct { format string expected LogFormat @@ -37,7 +36,6 @@ func Test_ParseLogFormat(t *testing.T) { } func Test_ParseEnv_VAULT_LOG_FORMAT(t *testing.T) { - oldVLF := os.Getenv("VAULT_LOG_FORMAT") defer os.Setenv("VAULT_LOG_FORMAT", oldVLF) @@ -45,7 +43,6 @@ func Test_ParseEnv_VAULT_LOG_FORMAT(t *testing.T) { } func Test_ParseEnv_LOGXI_FORMAT(t *testing.T) { - oldVLF := os.Getenv("VAULT_LOG_FORMAT") defer os.Setenv("VAULT_LOG_FORMAT", oldVLF) @@ -57,16 +54,17 @@ func Test_ParseEnv_LOGXI_FORMAT(t *testing.T) { } func testParseEnvLogFormat(t *testing.T, name string) { - env := []string{ "json", "vauLT_Json", "VAULT-JSON", "vaulTJSon", "standard", "STANDARD", - "bogus"} + "bogus", + } formats := []LogFormat{ JSONFormat, JSONFormat, JSONFormat, JSONFormat, StandardFormat, StandardFormat, - UnspecifiedFormat} + UnspecifiedFormat, + } for i, e := range env { os.Setenv(name, e) diff --git a/sdk/helper/password/password_test.go b/sdk/helper/password/password_test.go index c44526d7ab..05022c22c3 100644 --- a/sdk/helper/password/password_test.go +++ b/sdk/helper/password/password_test.go @@ -9,7 +9,7 @@ type testCase struct { } func TestRemoveiTermDelete(t *testing.T) { - var tests = []testCase{ + tests := []testCase{ {"NoDelete", "TestingStuff", "TestingStuff"}, {"SingleDelete", "Testing\x7fStuff", "Testing\x7fStuff"}, {"DeleteFirst", "\x7fTestingStuff", "\x7fTestingStuff"}, diff --git a/sdk/helper/pathmanager/pathmanager_test.go b/sdk/helper/pathmanager/pathmanager_test.go index 650c7de6ba..0021b1d1e9 100644 --- a/sdk/helper/pathmanager/pathmanager_test.go +++ b/sdk/helper/pathmanager/pathmanager_test.go @@ -118,16 +118,16 @@ func TestPathManager_HasExactPath(t *testing.T) { } tcases := []tCase{ - tCase{"path1/key1", true}, - tCase{"path2/key1", true}, - tCase{"path3/key1", true}, - tCase{"path1/key1/subkey1", true}, - tCase{"path1/key1/subkey99", false}, - tCase{"path2/key1/subkey1", true}, - tCase{"path1/key1/subkey1/subkey1", false}, - tCase{"nonexistentpath/key1", false}, - tCase{"path4/key1", false}, - tCase{"path5/key1/subkey1", false}, + {"path1/key1", true}, + {"path2/key1", true}, + {"path3/key1", true}, + {"path1/key1/subkey1", true}, + {"path1/key1/subkey99", false}, + {"path2/key1/subkey1", true}, + {"path1/key1/subkey1/subkey1", false}, + {"nonexistentpath/key1", false}, + {"path4/key1", false}, + {"path5/key1/subkey1", false}, } for _, tc := range tcases { diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go index 993fedf2f8..239d368523 100644 --- a/sdk/helper/pluginutil/run_config_test.go +++ b/sdk/helper/pluginutil/run_config_test.go @@ -39,7 +39,7 @@ func TestMakeConfig(t *testing.T) { sha256: []byte("some_sha256"), env: []string{"initial=true"}, pluginSets: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -65,7 +65,7 @@ func TestMakeConfig(t *testing.T) { MagicCookieValue: "magic_cookie_value", }, VersionedPlugins: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -98,7 +98,7 @@ func TestMakeConfig(t *testing.T) { sha256: []byte("some_sha256"), env: []string{"initial=true"}, pluginSets: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -127,7 +127,7 @@ func TestMakeConfig(t *testing.T) { MagicCookieValue: "magic_cookie_value", }, VersionedPlugins: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -162,7 +162,7 @@ func TestMakeConfig(t *testing.T) { sha256: []byte("some_sha256"), env: []string{"initial=true"}, pluginSets: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -188,7 +188,7 @@ func TestMakeConfig(t *testing.T) { MagicCookieValue: "magic_cookie_value", }, VersionedPlugins: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -221,7 +221,7 @@ func TestMakeConfig(t *testing.T) { sha256: []byte("some_sha256"), env: []string{"initial=true"}, pluginSets: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, @@ -247,7 +247,7 @@ func TestMakeConfig(t *testing.T) { MagicCookieValue: "magic_cookie_value", }, VersionedPlugins: map[int]plugin.PluginSet{ - 1: plugin.PluginSet{ + 1: { "bogus": nil, }, }, diff --git a/sdk/helper/policyutil/policyutil.go b/sdk/helper/policyutil/policyutil.go index 85beaf214d..1d6cc1df39 100644 --- a/sdk/helper/policyutil/policyutil.go +++ b/sdk/helper/policyutil/policyutil.go @@ -107,10 +107,10 @@ func EquivalentPolicies(a, b []string) bool { // Now we'll build our checking slices var sortedA, sortedB []string - for keyA, _ := range mapA { + for keyA := range mapA { sortedA = append(sortedA, keyA) } - for keyB, _ := range mapB { + for keyB := range mapB { sortedB = append(sortedB, keyB) } sort.Strings(sortedA) diff --git a/sdk/helper/strutil/strutil_test.go b/sdk/helper/strutil/strutil_test.go index bc1a6b7712..8a9410bdb0 100644 --- a/sdk/helper/strutil/strutil_test.go +++ b/sdk/helper/strutil/strutil_test.go @@ -86,7 +86,6 @@ func TestStrutil_ListContainsGlob(t *testing.T) { if !StrListContainsGlob(haystack, "_test_") { t.Fatalf("Value should exist") } - } func TestStrutil_ListContains(t *testing.T) { @@ -152,7 +151,7 @@ func TestStrutil_ParseKeyValues(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -164,7 +163,7 @@ func TestStrutil_ParseKeyValues(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -173,7 +172,7 @@ func TestStrutil_ParseKeyValues(t *testing.T) { if err == nil { t.Fatalf("expected an error") } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -182,7 +181,7 @@ func TestStrutil_ParseKeyValues(t *testing.T) { if err == nil { t.Fatalf("expected an error") } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -211,7 +210,7 @@ func TestStrutil_ParseArbitraryKeyValues(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -224,7 +223,7 @@ func TestStrutil_ParseArbitraryKeyValues(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -237,7 +236,7 @@ func TestStrutil_ParseArbitraryKeyValues(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for k, _ := range actual { + for k := range actual { delete(actual, k) } @@ -250,7 +249,7 @@ func TestStrutil_ParseArbitraryKeyValues(t *testing.T) { if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) } - for k, _ := range actual { + for k := range actual { delete(actual, k) } } @@ -326,23 +325,23 @@ func TestGlobbedStringsMatch(t *testing.T) { } tCases := []tCase{ - tCase{"", "", true}, - tCase{"*", "*", true}, - tCase{"**", "**", true}, - tCase{"*t", "t", true}, - tCase{"*t", "test", true}, - tCase{"t*", "test", true}, - tCase{"*test", "test", true}, - tCase{"*test", "a test", true}, - tCase{"test", "a test", false}, - tCase{"*test", "tests", false}, - tCase{"test*", "test", true}, - tCase{"test*", "testsss", true}, - tCase{"test**", "testsss", false}, - tCase{"test**", "test*", true}, - tCase{"**test", "*test", true}, - tCase{"TEST", "test", false}, - tCase{"test", "test", true}, + {"", "", true}, + {"*", "*", true}, + {"**", "**", true}, + {"*t", "t", true}, + {"*t", "test", true}, + {"t*", "test", true}, + {"*test", "test", true}, + {"*test", "a test", true}, + {"test", "a test", false}, + {"*test", "tests", false}, + {"test*", "test", true}, + {"test*", "testsss", true}, + {"test**", "testsss", false}, + {"test**", "test*", true}, + {"**test", "*test", true}, + {"TEST", "test", false}, + {"test", "test", true}, } for _, tc := range tCases { @@ -424,11 +423,11 @@ func TestStrUtil_RemoveDuplicates(t *testing.T) { } tCases := []tCase{ - tCase{[]string{}, []string{}, false}, - tCase{[]string{}, []string{}, true}, - tCase{[]string{"a", "b", "a"}, []string{"a", "b"}, false}, - tCase{[]string{"A", "b", "a"}, []string{"A", "a", "b"}, false}, - tCase{[]string{"A", "b", "a"}, []string{"a", "b"}, true}, + {[]string{}, []string{}, false}, + {[]string{}, []string{}, true}, + {[]string{"a", "b", "a"}, []string{"a", "b"}, false}, + {[]string{"A", "b", "a"}, []string{"A", "a", "b"}, false}, + {[]string{"A", "b", "a"}, []string{"a", "b"}, true}, } for _, tc := range tCases { @@ -448,14 +447,14 @@ func TestStrUtil_RemoveDuplicatesStable(t *testing.T) { } tCases := []tCase{ - tCase{[]string{}, []string{}, false}, - tCase{[]string{}, []string{}, true}, - tCase{[]string{"a", "b", "a"}, []string{"a", "b"}, false}, - tCase{[]string{"A", "b", "a"}, []string{"A", "b", "a"}, false}, - tCase{[]string{"A", "b", "a"}, []string{"A", "b"}, true}, - tCase{[]string{" ", "d", "c", "d"}, []string{"d", "c"}, false}, - tCase{[]string{"Z ", " z", " z ", "y"}, []string{"Z ", "y"}, true}, - tCase{[]string{"Z ", " z", " z ", "y"}, []string{"Z ", " z", "y"}, false}, + {[]string{}, []string{}, false}, + {[]string{}, []string{}, true}, + {[]string{"a", "b", "a"}, []string{"a", "b"}, false}, + {[]string{"A", "b", "a"}, []string{"A", "b", "a"}, false}, + {[]string{"A", "b", "a"}, []string{"A", "b"}, true}, + {[]string{" ", "d", "c", "d"}, []string{"d", "c"}, false}, + {[]string{"Z ", " z", " z ", "y"}, []string{"Z ", "y"}, true}, + {[]string{"Z ", " z", " z ", "y"}, []string{"Z ", " z", "y"}, false}, } for _, tc := range tCases { @@ -475,13 +474,13 @@ func TestStrUtil_ParseStringSlice(t *testing.T) { } tCases := []tCase{ - tCase{"", "", []string{}}, - tCase{" ", ",", []string{}}, - tCase{", ", ",", []string{"", ""}}, - tCase{"a", ",", []string{"a"}}, - tCase{" a, b, c ", ",", []string{"a", "b", "c"}}, - tCase{" a; b; c ", ";", []string{"a", "b", "c"}}, - tCase{" a :: b :: c ", "::", []string{"a", "b", "c"}}, + {"", "", []string{}}, + {" ", ",", []string{}}, + {", ", ",", []string{"", ""}}, + {"a", ",", []string{"a"}}, + {" a, b, c ", ",", []string{"a", "b", "c"}}, + {" a; b; c ", ";", []string{"a", "b", "c"}}, + {" a :: b :: c ", "::", []string{"a", "b", "c"}}, } for _, tc := range tCases { diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go index bc4dd1d733..29f9748344 100644 --- a/sdk/helper/tokenutil/tokenutil.go +++ b/sdk/helper/tokenutil/tokenutil.go @@ -71,7 +71,7 @@ func AddTokenFieldsWithAllowList(m map[string]*framework.FieldSchema, allowed [] // TokenFields provides a set of field schemas for the parameters func TokenFields() map[string]*framework.FieldSchema { return map[string]*framework.FieldSchema{ - "token_bound_cidrs": &framework.FieldSchema{ + "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, DisplayAttrs: &framework.DisplayAttributes{ @@ -80,7 +80,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_explicit_max_ttl": &framework.FieldSchema{ + "token_explicit_max_ttl": { Type: framework.TypeDurationSecond, Description: tokenExplicitMaxTTLHelp, DisplayAttrs: &framework.DisplayAttributes{ @@ -89,7 +89,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_max_ttl": &framework.FieldSchema{ + "token_max_ttl": { Type: framework.TypeDurationSecond, Description: "The maximum lifetime of the generated token", DisplayAttrs: &framework.DisplayAttributes{ @@ -98,7 +98,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_no_default_policy": &framework.FieldSchema{ + "token_no_default_policy": { Type: framework.TypeBool, Description: "If true, the 'default' policy will not automatically be added to generated tokens", DisplayAttrs: &framework.DisplayAttributes{ @@ -107,7 +107,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_period": &framework.FieldSchema{ + "token_period": { Type: framework.TypeDurationSecond, Description: tokenPeriodHelp, DisplayAttrs: &framework.DisplayAttributes{ @@ -116,7 +116,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_policies": &framework.FieldSchema{ + "token_policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", DisplayAttrs: &framework.DisplayAttributes{ @@ -125,7 +125,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_type": &framework.FieldSchema{ + "token_type": { Type: framework.TypeString, Default: "default-service", Description: "The type of token to generate, service or batch", @@ -135,7 +135,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_ttl": &framework.FieldSchema{ + "token_ttl": { Type: framework.TypeDurationSecond, Description: "The initial ttl of the token to generate", DisplayAttrs: &framework.DisplayAttributes{ @@ -144,7 +144,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_num_uses": &framework.FieldSchema{ + "token_num_uses": { Type: framework.TypeInt, Description: "The maximum number of times a token may be used, a value of zero means unlimited", DisplayAttrs: &framework.DisplayAttributes{ diff --git a/sdk/logical/storage_test.go b/sdk/logical/storage_test.go index c8ba5fc46c..3b96b4dbef 100644 --- a/sdk/logical/storage_test.go +++ b/sdk/logical/storage_test.go @@ -24,7 +24,6 @@ func TestScanView(t *testing.T) { err := ScanView(context.Background(), s, func(path string) { keys = append(keys, path) }) - if err != nil { t.Fatal(err) } @@ -56,7 +55,6 @@ func TestCollectKeys(t *testing.T) { s := prepKeyStorage(t) keys, err := CollectKeys(context.Background(), s) - if err != nil { t.Fatal(err) } @@ -70,7 +68,6 @@ func TestCollectKeysPrefix(t *testing.T) { s := prepKeyStorage(t) keys, err := CollectKeysWithPrefix(context.Background(), s, "foo") - if err != nil { t.Fatal(err) } diff --git a/sdk/logical/storage_view.go b/sdk/logical/storage_view.go index 682ecf714e..2cd07715c2 100644 --- a/sdk/logical/storage_view.go +++ b/sdk/logical/storage_view.go @@ -11,9 +11,7 @@ type StorageView struct { prefix string } -var ( - ErrRelativePath = errors.New("relative paths not supported") -) +var ErrRelativePath = errors.New("relative paths not supported") func NewStorageView(storage Storage, prefix string) *StorageView { return &StorageView{ diff --git a/sdk/physical/cache.go b/sdk/physical/cache.go index f489858442..52768776a6 100644 --- a/sdk/physical/cache.go +++ b/sdk/physical/cache.go @@ -69,10 +69,12 @@ type TransactionalCache struct { } // Verify Cache satisfies the correct interfaces -var _ ToggleablePurgemonster = (*Cache)(nil) -var _ ToggleablePurgemonster = (*TransactionalCache)(nil) -var _ Backend = (*Cache)(nil) -var _ Transactional = (*TransactionalCache)(nil) +var ( + _ ToggleablePurgemonster = (*Cache)(nil) + _ ToggleablePurgemonster = (*TransactionalCache)(nil) + _ Backend = (*Cache)(nil) + _ Transactional = (*TransactionalCache)(nil) +) // NewCache returns a physical cache of the given size. // If no size is provided, the default size is used. diff --git a/sdk/physical/encoding.go b/sdk/physical/encoding.go index d2f93478b0..dbde84cc6d 100644 --- a/sdk/physical/encoding.go +++ b/sdk/physical/encoding.go @@ -8,8 +8,10 @@ import ( "unicode/utf8" ) -var ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") -var ErrNonPrintable = errors.New("key contains non-printable characters") +var ( + ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") + ErrNonPrintable = errors.New("key contains non-printable characters") +) // StorageEncoding is used to add errors into underlying physical requests type StorageEncoding struct { @@ -24,8 +26,10 @@ type TransactionalStorageEncoding struct { } // Verify StorageEncoding satisfies the correct interfaces -var _ Backend = (*StorageEncoding)(nil) -var _ Transactional = (*TransactionalStorageEncoding)(nil) +var ( + _ Backend = (*StorageEncoding)(nil) + _ Transactional = (*TransactionalStorageEncoding)(nil) +) // NewStorageEncoding returns a wrapped physical backend and verifies the key // encoding diff --git a/sdk/physical/error.go b/sdk/physical/error.go index 8091f178bc..b547e4e428 100644 --- a/sdk/physical/error.go +++ b/sdk/physical/error.go @@ -31,8 +31,10 @@ type TransactionalErrorInjector struct { } // Verify ErrorInjector satisfies the correct interfaces -var _ Backend = (*ErrorInjector)(nil) -var _ Transactional = (*TransactionalErrorInjector)(nil) +var ( + _ Backend = (*ErrorInjector)(nil) + _ Transactional = (*TransactionalErrorInjector)(nil) +) // NewErrorInjector returns a wrapped physical backend to inject error func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { diff --git a/sdk/physical/file/file.go b/sdk/physical/file/file.go index d08d1c2b67..320ee21caa 100644 --- a/sdk/physical/file/file.go +++ b/sdk/physical/file/file.go @@ -21,9 +21,11 @@ import ( ) // Verify FileBackend satisfies the correct interfaces -var _ physical.Backend = (*FileBackend)(nil) -var _ physical.Transactional = (*TransactionalFileBackend)(nil) -var _ physical.PseudoTransactional = (*FileBackend)(nil) +var ( + _ physical.Backend = (*FileBackend)(nil) + _ physical.Transactional = (*TransactionalFileBackend)(nil) + _ physical.PseudoTransactional = (*FileBackend)(nil) +) // FileBackend is a physical backend that stores data on disk // at a given file path. It can be used for durable single server @@ -234,7 +236,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er } // Make the parent tree - if err := os.MkdirAll(path, 0700); err != nil { + if err := os.MkdirAll(path, 0o700); err != nil { return err } @@ -243,7 +245,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er f, err := os.OpenFile( fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, - 0600) + 0o600) if err != nil { if f != nil { f.Close() diff --git a/sdk/physical/file/file_test.go b/sdk/physical/file/file_test.go index 90da070e25..724b8a012a 100644 --- a/sdk/physical/file/file_test.go +++ b/sdk/physical/file/file_test.go @@ -45,7 +45,7 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) { f, err := os.OpenFile( rawFullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, - 0600) + 0o600) if err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) { f, err = os.OpenFile( rawFullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, - 0600) + 0o600) if err != nil { t.Fatal(err) } diff --git a/sdk/physical/inmem/cache_test.go b/sdk/physical/inmem/cache_test.go index 34028c7ec7..e6e6dabfe3 100644 --- a/sdk/physical/inmem/cache_test.go +++ b/sdk/physical/inmem/cache_test.go @@ -327,5 +327,4 @@ func TestCache_Refresh(t *testing.T) { if string(r.Value) != "baz" { t.Fatalf("expected value baz, got %s", string(r.Value)) } - } diff --git a/sdk/physical/inmem/inmem.go b/sdk/physical/inmem/inmem.go index 9739a7587a..b366eb84bf 100644 --- a/sdk/physical/inmem/inmem.go +++ b/sdk/physical/inmem/inmem.go @@ -17,12 +17,14 @@ import ( ) // Verify interfaces are satisfied -var _ physical.Backend = (*InmemBackend)(nil) -var _ physical.HABackend = (*InmemHABackend)(nil) -var _ physical.HABackend = (*TransactionalInmemHABackend)(nil) -var _ physical.Lock = (*InmemLock)(nil) -var _ physical.Transactional = (*TransactionalInmemBackend)(nil) -var _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +var ( + _ physical.Backend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +) var ( PutDisabledError = errors.New("put operations disabled in inmem backend") diff --git a/sdk/physical/latency.go b/sdk/physical/latency.go index 51bb560c2f..11b413c4d0 100644 --- a/sdk/physical/latency.go +++ b/sdk/physical/latency.go @@ -32,8 +32,10 @@ type TransactionalLatencyInjector struct { } // Verify LatencyInjector satisfies the correct interfaces -var _ Backend = (*LatencyInjector)(nil) -var _ Transactional = (*TransactionalLatencyInjector)(nil) +var ( + _ Backend = (*LatencyInjector)(nil) + _ Transactional = (*TransactionalLatencyInjector)(nil) +) // NewLatencyInjector returns a wrapped physical backend to simulate latency func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { diff --git a/sdk/physical/physical_view.go b/sdk/physical/physical_view.go index d891481983..189ac93172 100644 --- a/sdk/physical/physical_view.go +++ b/sdk/physical/physical_view.go @@ -6,9 +6,7 @@ import ( "strings" ) -var ( - ErrRelativePath = errors.New("relative paths not supported") -) +var ErrRelativePath = errors.New("relative paths not supported") // View represents a prefixed view of a physical backend type View struct { diff --git a/sdk/physical/testing.go b/sdk/physical/testing.go index 0970b8694f..6e0ddfcc0e 100644 --- a/sdk/physical/testing.go +++ b/sdk/physical/testing.go @@ -458,33 +458,33 @@ func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { } txns := []*TxnEntry{ - &TxnEntry{ + { Operation: PutOperation, Entry: &Entry{ Key: "foo", Value: []byte("bar2"), }, }, - &TxnEntry{ + { Operation: DeleteOperation, Entry: &Entry{ Key: "deleteme", }, }, - &TxnEntry{ + { Operation: PutOperation, Entry: &Entry{ Key: "foo", Value: []byte("bar3"), }, }, - &TxnEntry{ + { Operation: DeleteOperation, Entry: &Entry{ Key: "deleteme2", }, }, - &TxnEntry{ + { Operation: PutOperation, Entry: &Entry{ Key: "zip", diff --git a/sdk/plugin/backend.go b/sdk/plugin/backend.go index 17932efe40..82c7287327 100644 --- a/sdk/plugin/backend.go +++ b/sdk/plugin/backend.go @@ -12,8 +12,10 @@ import ( "github.com/hashicorp/vault/sdk/plugin/pb" ) -var _ plugin.Plugin = (*GRPCBackendPlugin)(nil) -var _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil) +var ( + _ plugin.Plugin = (*GRPCBackendPlugin)(nil) + _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil) +) // GRPCBackendPlugin is the plugin.Plugin implementation that only supports GRPC // transport diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go index 4bb9a2a4b3..9ea3c23f8c 100644 --- a/sdk/plugin/grpc_backend_client.go +++ b/sdk/plugin/grpc_backend_client.go @@ -17,8 +17,10 @@ import ( "github.com/hashicorp/vault/sdk/plugin/pb" ) -var ErrPluginShutdown = errors.New("plugin is shut down") -var ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") +var ( + ErrPluginShutdown = errors.New("plugin is shut down") + ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") +) // Validate backendGRPCPluginClient satisfies the logical.Backend interface var _ logical.Backend = &backendGRPCPluginClient{} diff --git a/sdk/plugin/grpc_system_test.go b/sdk/plugin/grpc_system_test.go index 205619463f..cf1449f059 100644 --- a/sdk/plugin/grpc_system_test.go +++ b/sdk/plugin/grpc_system_test.go @@ -154,7 +154,7 @@ func TestSystem_GRPC_entityInfo(t *testing.T) { "foo": "bar", }, Aliases: []*logical.Alias{ - &logical.Alias{ + { MountType: "logical", MountAccessor: "accessor", Name: "name", diff --git a/sdk/plugin/logger.go b/sdk/plugin/logger.go index a59a8a3da2..ecf6ed01f1 100644 --- a/sdk/plugin/logger.go +++ b/sdk/plugin/logger.go @@ -32,7 +32,6 @@ func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error { } func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error { - switch translateLevel(args.Level) { case hclog.Trace: @@ -107,7 +106,6 @@ type LoggerReply struct { } func translateLevel(logxiLevel int) hclog.Level { - switch logxiLevel { case 1000, 10: diff --git a/sdk/plugin/logger_test.go b/sdk/plugin/logger_test.go index 800710d308..99c27b15b1 100644 --- a/sdk/plugin/logger_test.go +++ b/sdk/plugin/logger_test.go @@ -138,7 +138,6 @@ func TestLogger_log(t *testing.T) { if !strings.Contains(result, expected) { t.Fatalf("expected log to contain %s, got %s", expected, result) } - } func TestLogger_setLevel(t *testing.T) { @@ -186,6 +185,7 @@ func (l *deprecatedLoggerClient) Info(msg string, args ...interface{}) { } l.client.Call("Plugin.Info", cArgs, &struct{}{}) } + func (l *deprecatedLoggerClient) Warn(msg string, args ...interface{}) error { var reply LoggerReply cArgs := &LoggerArgs{ @@ -202,6 +202,7 @@ func (l *deprecatedLoggerClient) Warn(msg string, args ...interface{}) error { return nil } + func (l *deprecatedLoggerClient) Error(msg string, args ...interface{}) error { var reply LoggerReply cArgs := &LoggerArgs{ @@ -242,6 +243,7 @@ func (l *deprecatedLoggerClient) IsTrace() bool { l.client.Call("Plugin.IsTrace", new(interface{}), &reply) return reply.IsTrue } + func (l *deprecatedLoggerClient) IsDebug() bool { var reply LoggerReply l.client.Call("Plugin.IsDebug", new(interface{}), &reply) diff --git a/sdk/plugin/mock/path_errors.go b/sdk/plugin/mock/path_errors.go index 16df5cc25c..05ef474a7e 100644 --- a/sdk/plugin/mock/path_errors.go +++ b/sdk/plugin/mock/path_errors.go @@ -15,22 +15,22 @@ import ( // it is used to test the invalidate func. func errorPaths(b *backend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "errors/rpc", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathErrorRPCRead, }, }, - &framework.Path{ + { Pattern: "errors/kill", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathErrorRPCRead, }, }, - &framework.Path{ + { Pattern: "errors/type", Fields: map[string]*framework.FieldSchema{ - "err_type": &framework.FieldSchema{Type: framework.TypeInt}, + "err_type": {Type: framework.TypeInt}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.CreateOperation: b.pathErrorRPCRead, @@ -71,5 +71,4 @@ func (b *backend) pathErrorRPCRead(ctx context.Context, req *logical.Request, da } return nil, err - } diff --git a/sdk/plugin/mock/path_internal.go b/sdk/plugin/mock/path_internal.go index 30bee271a0..26ede270fa 100644 --- a/sdk/plugin/mock/path_internal.go +++ b/sdk/plugin/mock/path_internal.go @@ -13,7 +13,7 @@ func pathInternal(b *backend) *framework.Path { return &framework.Path{ Pattern: "internal", Fields: map[string]*framework.FieldSchema{ - "value": &framework.FieldSchema{Type: framework.TypeString}, + "value": {Type: framework.TypeString}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathInternalUpdate, diff --git a/sdk/plugin/mock/path_kv.go b/sdk/plugin/mock/path_kv.go index efafe7b2cc..1946b57624 100644 --- a/sdk/plugin/mock/path_kv.go +++ b/sdk/plugin/mock/path_kv.go @@ -12,18 +12,18 @@ import ( // version of the passthrough backend that only accepts string values. func kvPaths(b *backend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "kv/?", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKVList, }, }, - &framework.Path{ + { Pattern: "kv/" + framework.GenericNameRegex("key"), Fields: map[string]*framework.FieldSchema{ - "key": &framework.FieldSchema{Type: framework.TypeString}, - "value": &framework.FieldSchema{Type: framework.TypeString}, - "version": &framework.FieldSchema{Type: framework.TypeInt}, + "key": {Type: framework.TypeString}, + "value": {Type: framework.TypeString}, + "version": {Type: framework.TypeInt}, }, ExistenceCheck: b.pathExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ diff --git a/sdk/plugin/mock/path_raw.go b/sdk/plugin/mock/path_raw.go index 41631ddac3..55cb7c9374 100644 --- a/sdk/plugin/mock/path_raw.go +++ b/sdk/plugin/mock/path_raw.go @@ -25,5 +25,4 @@ func (b *backend) pathRawRead(ctx context.Context, req *logical.Request, data *f logical.HTTPStatusCode: 200, }, }, nil - } diff --git a/sdk/plugin/mock/path_special.go b/sdk/plugin/mock/path_special.go index 626dd22215..22afa41c6d 100644 --- a/sdk/plugin/mock/path_special.go +++ b/sdk/plugin/mock/path_special.go @@ -24,5 +24,4 @@ func (b *backend) pathSpecialRead(ctx context.Context, req *logical.Request, dat "data": "foo", }, }, nil - } diff --git a/sdk/plugin/pb/translation_test.go b/sdk/plugin/pb/translation_test.go index dcdf553191..f63565b767 100644 --- a/sdk/plugin/pb/translation_test.go +++ b/sdk/plugin/pb/translation_test.go @@ -39,9 +39,9 @@ func TestTranslation_Errors(t *testing.T) { func TestTranslation_StorageEntry(t *testing.T) { tCases := []*logical.StorageEntry{ nil, - &logical.StorageEntry{Key: "key", Value: []byte("value")}, - &logical.StorageEntry{Key: "key1", Value: []byte("value1"), SealWrap: true}, - &logical.StorageEntry{Key: "key1", SealWrap: true}, + {Key: "key", Value: []byte("value")}, + {Key: "key1", Value: []byte("value1"), SealWrap: true}, + {Key: "key1", SealWrap: true}, } for _, c := range tCases { @@ -57,7 +57,7 @@ func TestTranslation_StorageEntry(t *testing.T) { func TestTranslation_Request(t *testing.T) { tCases := []*logical.Request{ nil, - &logical.Request{ + { ID: "ID", ReplicationCluster: "RID", Operation: logical.CreateOperation, @@ -76,7 +76,7 @@ func TestTranslation_Request(t *testing.T) { RemoteAddr: "localhost", }, }, - &logical.Request{ + { ID: "ID", ReplicationCluster: "RID", Operation: logical.CreateOperation, @@ -129,7 +129,7 @@ func TestTranslation_Request(t *testing.T) { Name: "name", }, GroupAliases: []*logical.Alias{ - &logical.Alias{ + { MountType: "type", MountAccessor: "accessor", Name: "name", @@ -137,7 +137,7 @@ func TestTranslation_Request(t *testing.T) { }, }, Headers: map[string][]string{ - "X-Vault-Test": []string{"test"}, + "X-Vault-Test": {"test"}, }, ClientToken: "token", ClientTokenAccessor: "accessor", @@ -176,13 +176,13 @@ func TestTranslation_Request(t *testing.T) { func TestTranslation_Response(t *testing.T) { tCases := []*logical.Response{ nil, - &logical.Response{ + { Data: map[string]interface{}{ "data": "blah", }, Warnings: []string{"warning"}, }, - &logical.Response{ + { Data: map[string]interface{}{ "string": "string", "bool": true, @@ -231,7 +231,7 @@ func TestTranslation_Response(t *testing.T) { Name: "name", }, GroupAliases: []*logical.Alias{ - &logical.Alias{ + { MountType: "type", MountAccessor: "accessor", Name: "name", diff --git a/sdk/plugin/plugin.go b/sdk/plugin/plugin.go index f12a36928c..f4f2d8e18f 100644 --- a/sdk/plugin/plugin.go +++ b/sdk/plugin/plugin.go @@ -76,12 +76,12 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "backend": &GRPCBackendPlugin{ MetadataMode: isMetadataMode, }, }, - 4: plugin.PluginSet{ + 4: { "backend": &GRPCBackendPlugin{ MetadataMode: isMetadataMode, }, diff --git a/sdk/plugin/serve.go b/sdk/plugin/serve.go index b8cd3e58f0..1119a2dac6 100644 --- a/sdk/plugin/serve.go +++ b/sdk/plugin/serve.go @@ -43,13 +43,13 @@ func Serve(opts *ServeOpts) error { // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, }, }, - 4: plugin.PluginSet{ + 4: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, diff --git a/sdk/plugin/storage_test.go b/sdk/plugin/storage_test.go index fa61dece35..21fc4e7ecf 100644 --- a/sdk/plugin/storage_test.go +++ b/sdk/plugin/storage_test.go @@ -26,5 +26,4 @@ func TestStorage_GRPC(t *testing.T) { testStorage := &GRPCStorageClient{client: pb.NewStorageClient(client)} logical.TestStorage(t, testStorage) - } diff --git a/sdk/testing/stepwise/environments/docker/environment.go b/sdk/testing/stepwise/environments/docker/environment.go index 7cc0191745..18c93b3775 100644 --- a/sdk/testing/stepwise/environments/docker/environment.go +++ b/sdk/testing/stepwise/environments/docker/environment.go @@ -90,7 +90,7 @@ func (dc *DockerCluster) Teardown() error { } } - //clean up networks + // clean up networks if dc.networkID != "" { cli, err := docker.NewClientWithOpts(docker.FromEnv, docker.WithVersion(dockerVersion)) if err != nil { @@ -349,7 +349,7 @@ func (dc *DockerCluster) setupCA(opts *DockerClusterOptions) error { dc.CACertPEM = pem.EncodeToMemory(CACertPEMBlock) dc.CACertPEMFile = filepath.Join(dc.tmpDir, "ca", "ca.pem") - err = ioutil.WriteFile(dc.CACertPEMFile, dc.CACertPEM, 0755) + err = ioutil.WriteFile(dc.CACertPEMFile, dc.CACertPEM, 0o755) if err != nil { return err } @@ -415,13 +415,13 @@ func (n *dockerClusterNode) setupCert() error { }) n.ServerCertPEMFile = filepath.Join(n.WorkDir, "cert.pem") - err = ioutil.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0755) + err = ioutil.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0o755) if err != nil { return err } n.ServerKeyPEMFile = filepath.Join(n.WorkDir, "key.pem") - err = ioutil.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0755) + err = ioutil.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0o755) if err != nil { return err } @@ -573,7 +573,7 @@ func (n *dockerClusterNode) start(cli *docker.Client, caDir, netName string, net return err } - err = ioutil.WriteFile(filepath.Join(n.WorkDir, "local.json"), cfgJSON, 0644) + err = ioutil.WriteFile(filepath.Join(n.WorkDir, "local.json"), cfgJSON, 0o644) if err != nil { return err } @@ -693,7 +693,7 @@ var DefaultNumCores = 1 func (cluster *DockerCluster) setupDockerCluster(opts *DockerClusterOptions) error { if opts != nil && opts.tmpDir != "" { if _, err := os.Stat(opts.tmpDir); os.IsNotExist(err) { - if err := os.MkdirAll(opts.tmpDir, 0700); err != nil { + if err := os.MkdirAll(opts.tmpDir, 0o700); err != nil { return err } } @@ -706,7 +706,7 @@ func (cluster *DockerCluster) setupDockerCluster(opts *DockerClusterOptions) err cluster.tmpDir = tempDir } caDir := filepath.Join(cluster.tmpDir, "ca") - if err := os.MkdirAll(caDir, 0755); err != nil { + if err := os.MkdirAll(caDir, 0o755); err != nil { return err } @@ -729,7 +729,7 @@ func (cluster *DockerCluster) setupDockerCluster(opts *DockerClusterOptions) err WorkDir: filepath.Join(cluster.tmpDir, nodeID), } cluster.ClusterNodes = append(cluster.ClusterNodes, node) - if err := os.MkdirAll(node.WorkDir, 0700); err != nil { + if err := os.MkdirAll(node.WorkDir, 0o700); err != nil { return err } } diff --git a/sdk/testing/stepwise/helpers.go b/sdk/testing/stepwise/helpers.go index 2a48c6834b..81f173990e 100644 --- a/sdk/testing/stepwise/helpers.go +++ b/sdk/testing/stepwise/helpers.go @@ -8,7 +8,6 @@ import ( "encoding/pem" "errors" "fmt" - "github.com/hashicorp/errwrap" "io" "io/ioutil" "os" @@ -16,6 +15,8 @@ import ( "path" "strings" "sync" + + "github.com/hashicorp/errwrap" ) const pluginPrefix = "vault-plugin-" diff --git a/sdk/testing/stepwise/stepwise_test.go b/sdk/testing/stepwise/stepwise_test.go index 799d041fdd..d70437fb6f 100644 --- a/sdk/testing/stepwise/stepwise_test.go +++ b/sdk/testing/stepwise/stepwise_test.go @@ -38,7 +38,7 @@ func TestStepwise_Run_SkipIfNotAcc(t *testing.T) { defer os.Setenv(TestEnvVar, "1") skipCase := Case{ Environment: new(mockEnvironment), - Steps: []Step{Step{}}, + Steps: []Step{{}}, } expected := mockT{ diff --git a/serviceregistration/consul/consul_service_registration.go b/serviceregistration/consul/consul_service_registration.go index 65f1a970a6..2a0d8394c5 100644 --- a/serviceregistration/consul/consul_service_registration.go +++ b/serviceregistration/consul/consul_service_registration.go @@ -50,9 +50,7 @@ const ( reconcileTimeout = 60 * time.Second ) -var ( - hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) -) +var hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) // serviceRegistration is a ServiceRegistration that advertises the state of // Vault to Consul. diff --git a/serviceregistration/consul/consul_service_registration_test.go b/serviceregistration/consul/consul_service_registration_test.go index 5f065818bc..ba0822f3db 100644 --- a/serviceregistration/consul/consul_service_registration_test.go +++ b/serviceregistration/consul/consul_service_registration_test.go @@ -121,8 +121,8 @@ func TestConsul_ServiceRegistration(t *testing.T) { defer core.Shutdown() waitForServices(t, map[string][]string{ - "consul": []string{}, - "vault": []string{"standby"}, + "consul": {}, + "vault": {"standby"}, }) // Initialize and unseal the core @@ -140,8 +140,8 @@ func TestConsul_ServiceRegistration(t *testing.T) { vault.TestWaitActive(t, core) waitForServices(t, map[string][]string{ - "consul": []string{}, - "vault": []string{"active", "initialized"}, + "consul": {}, + "vault": {"active", "initialized"}, }) } diff --git a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go index bca8394b06..9eb031a362 100644 --- a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go +++ b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go @@ -30,10 +30,12 @@ import ( "github.com/hashicorp/vault/serviceregistration/kubernetes/client" ) -var callToMake string -var patchesToAdd string -var namespace string -var podName string +var ( + callToMake string + patchesToAdd string + namespace string + podName string +) func init() { flag.StringVar(&callToMake, "call", "", `the call to make: 'get-pod' or 'patch-pod'`) diff --git a/shamir/shamir.go b/shamir/shamir.go index f3fe4deb8b..9f706d7ae7 100644 --- a/shamir/shamir.go +++ b/shamir/shamir.go @@ -90,7 +90,7 @@ func div(a, b uint8) uint8 { log_a := logTable[a] log_b := logTable[b] - diff := ((int(log_a) - int(log_b))+255)%255 + diff := ((int(log_a) - int(log_b)) + 255) % 255 ret := int(expTable[diff]) diff --git a/shamir/shamir_test.go b/shamir/shamir_test.go index 09f90d5fc2..90a7c371c2 100644 --- a/shamir/shamir_test.go +++ b/shamir/shamir_test.go @@ -63,7 +63,7 @@ func TestCombine_invalid(t *testing.T) { t.Fatalf("should err") } - //Too short + // Too short parts = [][]byte{ []byte("f"), []byte("b"), diff --git a/shamir/tables.go b/shamir/tables.go index 76c245e79d..07ec4e5283 100644 --- a/shamir/tables.go +++ b/shamir/tables.go @@ -37,7 +37,8 @@ var ( 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c, 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d, 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0, - 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38} + 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38, + } // expTable provides the anti-log or exponentiation value // for the equivalent index @@ -73,5 +74,6 @@ var ( 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7, 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52, 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6, - 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01} + 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01, + } ) diff --git a/tools/tools.go b/tools/tools.go index 641477e584..85f6d34a4c 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -14,19 +14,24 @@ package tools // see https://github.com/golang/protobuf/releases#v1.4-generated-code and // https://github.com/protocolbuffers/protobuf-go/releases/tag/v1.20.0#v1.20-grpc-support //go:generate go install github.com/golang/protobuf/protoc-gen-go -import _ "github.com/golang/protobuf/protoc-gen-go" - //go:generate go install golang.org/x/tools/cmd/goimports -import _ "golang.org/x/tools/cmd/goimports" - //go:generate go install github.com/mitchellh/gox -import _ "github.com/mitchellh/gox" - //go:generate go install github.com/hashicorp/go-bindata -import _ "github.com/hashicorp/go-bindata" - //go:generate go install github.com/elazarl/go-bindata-assetfs -import _ "github.com/elazarl/go-bindata-assetfs" - //go:generate go install github.com/client9/misspell/cmd/misspell -import _ "github.com/client9/misspell/cmd/misspell" +//go:generate go install mvdan.cc/gofumpt +import ( + _ "github.com/golang/protobuf/protoc-gen-go" + + _ "golang.org/x/tools/cmd/goimports" + + _ "github.com/mitchellh/gox" + + _ "github.com/hashicorp/go-bindata" + + _ "github.com/elazarl/go-bindata-assetfs" + + _ "github.com/client9/misspell/cmd/misspell" + + _ "mvdan.cc/gofumpt" +) diff --git a/vault/acl.go b/vault/acl.go index 1313fa33b5..040258f6f3 100644 --- a/vault/acl.go +++ b/vault/acl.go @@ -681,7 +681,7 @@ func (c *Core) performPolicyChecks(ctx context.Context, acl *ACL, te *logical.To ret.RootPrivs = ret.ACLResults.RootPrivs // Root is always allowed; skip Sentinel/MFA checks if ret.ACLResults.IsRoot { - //logger.Warn("token is root, skipping checks") + // logger.Warn("token is root, skipping checks") ret.Allowed = true return ret } diff --git a/vault/acl_test.go b/vault/acl_test.go index 296e104dbe..29c690e01a 100644 --- a/vault/acl_test.go +++ b/vault/acl_test.go @@ -20,7 +20,7 @@ func TestACL_NewACL(t *testing.T) { func testNewACL(t *testing.T, ns *namespace.Namespace) { ctx := namespace.ContextWithNamespace(context.Background(), ns) - policy := []*Policy{&Policy{Name: "root"}} + policy := []*Policy{{Name: "root"}} _, err := NewACL(ctx, policy) switch ns.ID { case namespace.RootNamespaceID: @@ -100,7 +100,7 @@ path "secret/split/definition" { func TestACL_Capabilities(t *testing.T) { t.Run("root-ns", func(t *testing.T) { t.Parallel() - policy := []*Policy{&Policy{Name: "root"}} + policy := []*Policy{{Name: "root"}} ctx := namespace.RootContext(nil) acl, err := NewACL(ctx, policy) if err != nil { @@ -158,7 +158,7 @@ func TestACL_Root(t *testing.T) { func testACLRoot(t *testing.T, ns *namespace.Namespace) { // Create the root policy ACL. Always create on root namespace regardless of // which namespace to ACL check on. - policy := []*Policy{&Policy{Name: "root"}} + policy := []*Policy{{Name: "root"}} acl, err := NewACL(namespace.RootContext(nil), policy) if err != nil { t.Fatalf("err: %v", err) @@ -399,14 +399,14 @@ func testACLPolicyMerge(t *testing.T, ns *namespace.Namespace) { } tcases := []tcase{ - {"foo/bar", nil, nil, nil, map[string][]interface{}{"zip": []interface{}{}, "baz": []interface{}{}}, []string{"baz"}}, - {"hello/universe", createDuration(50), createDuration(200), map[string][]interface{}{"foo": []interface{}{}, "bar": []interface{}{}}, nil, []string{"foo", "bar"}}, - {"allow/all", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil, nil}, - {"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil, nil}, - {"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}, nil}, - {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}, nil}, - {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, nil}, - {"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}, nil}, + {"foo/bar", nil, nil, nil, map[string][]interface{}{"zip": {}, "baz": {}}, []string{"baz"}}, + {"hello/universe", createDuration(50), createDuration(200), map[string][]interface{}{"foo": {}, "bar": {}}, nil, []string{"foo", "bar"}}, + {"allow/all", nil, nil, map[string][]interface{}{"*": {}, "test": {}, "test1": {"foo"}}, nil, nil}, + {"allow/all1", nil, nil, map[string][]interface{}{"*": {}, "test": {}, "test1": {"foo"}}, nil, nil}, + {"deny/all", nil, nil, nil, map[string][]interface{}{"*": {}, "test": {}}, nil}, + {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": {}, "test": {}}, nil}, + {"value/merge", nil, nil, map[string][]interface{}{"test": {3, 4, 1, 2}}, map[string][]interface{}{"test": {3, 4, 1, 2}}, nil}, + {"value/empty", nil, nil, map[string][]interface{}{"empty": {}}, map[string][]interface{}{"empty": {}}, nil}, } for _, tc := range tcases { @@ -914,7 +914,7 @@ path "foo/bar" { } ` -//test merging +// test merging var mergingPolicies = ` name = "ops" path "foo/bar" { @@ -1036,7 +1036,7 @@ path "value/empty" { } ` -//allow operation testing +// allow operation testing var permissionsPolicy = ` name = "dev" path "dev/*" { @@ -1126,7 +1126,7 @@ path "var/req" { } ` -//allow operation testing +// allow operation testing var valuePermissionsPolicy = ` name = "op" path "dev/*" { diff --git a/vault/activity/query_test.go b/vault/activity/query_test.go index 47a99f8b31..d9a4e4ec39 100644 --- a/vault/activity/query_test.go +++ b/vault/activity/query_test.go @@ -77,7 +77,6 @@ func TestQueryStore_Inventory(t *testing.T) { if !reflect.DeepEqual(storedEndTimes, expected) { t.Fatalf("end time mismatch, expected %v got %v", expected, storedEndTimes) } - } func TestQueryStore_MarshalDemarshal(t *testing.T) { @@ -88,12 +87,12 @@ func TestQueryStore_MarshalDemarshal(t *testing.T) { StartTime: tsStart, EndTime: tsEnd, Namespaces: []*NamespaceRecord{ - &NamespaceRecord{ + { NamespaceID: "root", Entities: 20, NonEntityTokens: 42, }, - &NamespaceRecord{ + { NamespaceID: "yzABC", Entities: 15, NonEntityTokens: 31, @@ -158,7 +157,7 @@ func TestQueryStore_TimeRanges(t *testing.T) { StartTime: period.Begin, EndTime: e, Namespaces: []*NamespaceRecord{ - &NamespaceRecord{ + { NamespaceID: "root", Entities: 17, NonEntityTokens: 31, diff --git a/vault/activity_log.go b/vault/activity_log.go index 96e390cd40..3ead8908ab 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -1177,7 +1177,6 @@ func (a *ActivityLog) activeFragmentWorker() { endOfMonth.Reset(delta) } } - } type ActivityIntentLog struct { @@ -1358,7 +1357,6 @@ func (a *ActivityLog) receivedFragment(fragment *activity.LogFragment) { a.standbyFragmentsReceived = append(a.standbyFragmentsReceived, fragment) // TODO: check if current segment is full and should be written - } type ClientCountResponse struct { diff --git a/vault/activity_log_test.go b/vault/activity_log_test.go index 723053e431..c0a672ca5e 100644 --- a/vault/activity_log_test.go +++ b/vault/activity_log_test.go @@ -341,12 +341,12 @@ func TestActivityLog_ReceivedFragment(t *testing.T) { } entityRecords := []*activity.EntityRecord{ - &activity.EntityRecord{ + { EntityID: ids[0], NamespaceID: "root", Timestamp: time.Now().Unix(), }, - &activity.EntityRecord{ + { EntityID: ids[1], NamespaceID: "root", Timestamp: time.Now().Unix(), @@ -386,7 +386,6 @@ func TestActivityLog_availableLogsEmptyDirectory(t *testing.T) { core, _, _ := TestCoreUnsealed(t) a := core.activityLog times, err := a.availableLogs(context.Background()) - if err != nil { t.Fatalf("error getting start_time(s) for empty activity log") } @@ -966,12 +965,12 @@ func TestActivityLog_loadCurrentEntitySegment(t *testing.T) { // setup in-storage data to load for testing entityRecords := []*activity.EntityRecord{ - &activity.EntityRecord{ + { EntityID: "11111111-1111-1111-1111-111111111111", NamespaceID: "root", Timestamp: time.Now().Unix(), }, - &activity.EntityRecord{ + { EntityID: "22222222-2222-2222-2222-222222222222", NamespaceID: "root", Timestamp: time.Now().Unix(), @@ -1066,12 +1065,12 @@ func TestActivityLog_loadPriorEntitySegment(t *testing.T) { // setup in-storage data to load for testing entityRecords := []*activity.EntityRecord{ - &activity.EntityRecord{ + { EntityID: "11111111-1111-1111-1111-111111111111", NamespaceID: "root", Timestamp: time.Now().Unix(), }, - &activity.EntityRecord{ + { EntityID: "22222222-2222-2222-2222-222222222222", NamespaceID: "root", Timestamp: time.Now().Unix(), @@ -1253,7 +1252,6 @@ func TestActivityLog_StopAndRestart(t *testing.T) { if err != nil { t.Fatal(err) } - } // :base: is the timestamp to start from for the setup logic (use to simulate newest log from past or future) @@ -1269,17 +1267,17 @@ func setupActivityRecordsInStorage(t *testing.T, base time.Time, includeEntities var entityRecords []*activity.EntityRecord if includeEntities { entityRecords = []*activity.EntityRecord{ - &activity.EntityRecord{ + { EntityID: "11111111-1111-1111-1111-111111111111", NamespaceID: "root", Timestamp: time.Now().Unix(), }, - &activity.EntityRecord{ + { EntityID: "22222222-2222-2222-2222-222222222222", NamespaceID: "root", Timestamp: time.Now().Unix(), }, - &activity.EntityRecord{ + { EntityID: "33333333-2222-2222-2222-222222222222", NamespaceID: "root", Timestamp: time.Now().Unix(), @@ -2214,11 +2212,9 @@ func TestActivityLog_Precompute(t *testing.T) { g.Name, g.NamespaceLabel) } } - } -type BlockingInmemStorage struct { -} +type BlockingInmemStorage struct{} func (b *BlockingInmemStorage) List(ctx context.Context, prefix string) ([]string, error) { <-ctx.Done() @@ -2265,7 +2261,6 @@ func TestActivityLog_PrecomputeCancel(t *testing.T) { case <-timeout: t.Fatalf("timeout waiting for worker to finish") } - } func TestActivityLog_NextMonthStart(t *testing.T) { @@ -2318,7 +2313,6 @@ func waitForRetentionWorkerToFinish(t *testing.T, a *ActivityLog) { case <-timeout: t.Fatal("timeout waiting for retention worker to finish") } - } func TestActivityLog_Deletion(t *testing.T) { @@ -2433,7 +2427,6 @@ func TestActivityLog_Deletion(t *testing.T) { checkAbsent(i) } checkPresent(21) - } func TestActivityLog_partialMonthClientCount(t *testing.T) { diff --git a/vault/activity_log_testing_util.go b/vault/activity_log_testing_util.go index a37e1f90c9..ba9fe6542b 100644 --- a/vault/activity_log_testing_util.go +++ b/vault/activity_log_testing_util.go @@ -15,9 +15,9 @@ func (c *Core) InjectActivityLogDataThisMonth(t *testing.T) (map[string]struct{} t.Helper() activeEntities := map[string]struct{}{ - "entity0": struct{}{}, - "entity1": struct{}{}, - "entity2": struct{}{}, + "entity0": {}, + "entity1": {}, + "entity2": {}, } tokens := map[string]uint64{ "ns0": 5, @@ -66,7 +66,6 @@ func WriteToStorage(t *testing.T, c *Core, path string, data []byte) { Key: path, Value: data, }) - if err != nil { t.Fatalf("Failed to write %s\nto %s\nerror: %v", data, path, err) } diff --git a/vault/audit.go b/vault/audit.go index acf98a3885..5b30bc4d4a 100644 --- a/vault/audit.go +++ b/vault/audit.go @@ -35,10 +35,8 @@ const ( auditTableType = "audit" ) -var ( - // loadAuditFailed if loading audit tables encounters an error - errLoadAuditFailed = errors.New("failed to setup audit table") -) +// loadAuditFailed if loading audit tables encounters an error +var errLoadAuditFailed = errors.New("failed to setup audit table") func (c *Core) generateAuditTestProbe() (*logical.LogInput, error) { requestId, err := uuid.GenerateUUID() diff --git a/vault/audit_test.go b/vault/audit_test.go index 9287a3428d..ebd7de504d 100644 --- a/vault/audit_test.go +++ b/vault/audit_test.go @@ -2,14 +2,13 @@ package vault import ( "context" + "errors" "fmt" "reflect" "strings" "testing" "time" - "errors" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" @@ -118,13 +117,13 @@ func TestCore_EnableAudit_MixedFailures(t *testing.T) { c.audit = &MountTable{ Type: auditTableType, Entries: []*MountEntry{ - &MountEntry{ + { Table: auditTableType, Path: "noop/", Type: "noop", UUID: "abcd", }, - &MountEntry{ + { Table: auditTableType, Path: "noop2/", Type: "noop", @@ -172,7 +171,7 @@ func TestCore_EnableAudit_Local(t *testing.T) { c.audit = &MountTable{ Type: auditTableType, Entries: []*MountEntry{ - &MountEntry{ + { Table: auditTableType, Path: "noop/", Type: "noop", @@ -181,7 +180,7 @@ func TestCore_EnableAudit_Local(t *testing.T) { NamespaceID: namespace.RootNamespaceID, namespace: namespace.RootNamespace, }, - &MountEntry{ + { Table: auditTableType, Path: "noop2/", Type: "noop", @@ -567,9 +566,9 @@ func TestAuditBroker_AuditHeaders(t *testing.T) { Operation: logical.ReadOperation, Path: "sys/mounts", Headers: map[string][]string{ - "X-Test-Header": []string{"foo"}, - "X-Vault-Header": []string{"bar"}, - "Content-Type": []string{"baz"}, + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar"}, + "Content-Type": {"baz"}, }, } respErr := fmt.Errorf("permission denied") @@ -598,8 +597,8 @@ func TestAuditBroker_AuditHeaders(t *testing.T) { } expected := map[string][]string{ - "x-test-header": []string{"foo"}, - "x-vault-header": []string{"bar"}, + "x-test-header": {"foo"}, + "x-vault-header": {"bar"}, } for _, a := range []*NoopAudit{a1, a2} { diff --git a/vault/audited_headers_test.go b/vault/audited_headers_test.go index 11abb34574..b853ae1d70 100644 --- a/vault/audited_headers_test.go +++ b/vault/audited_headers_test.go @@ -51,7 +51,7 @@ func testAuditedHeadersConfig_Add(t *testing.T, conf *AuditedHeadersConfig) { } expected := map[string]*auditedHeaderSettings{ - "x-test-header": &auditedHeaderSettings{ + "x-test-header": { HMAC: false, }, } @@ -92,7 +92,6 @@ func testAuditedHeadersConfig_Add(t *testing.T, conf *AuditedHeadersConfig) { if !reflect.DeepEqual(headers, expected) { t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers) } - } func testAuditedHeadersConfig_Remove(t *testing.T, conf *AuditedHeadersConfig) { @@ -118,7 +117,7 @@ func testAuditedHeadersConfig_Remove(t *testing.T, conf *AuditedHeadersConfig) { } expected := map[string]*auditedHeaderSettings{ - "x-vault-header": &auditedHeaderSettings{ + "x-vault-header": { HMAC: true, }, } @@ -162,9 +161,9 @@ func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) { conf.add(context.Background(), "X-Vault-HeAdEr", true) reqHeaders := map[string][]string{ - "X-Test-Header": []string{"foo"}, - "X-Vault-Header": []string{"bar", "bar"}, - "Content-Type": []string{"json"}, + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, } hashFunc := func(ctx context.Context, s string) (string, error) { return "hashed", nil } @@ -175,25 +174,24 @@ func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) { } expected := map[string][]string{ - "x-test-header": []string{"foo"}, - "x-vault-header": []string{"hashed", "hashed"}, + "x-test-header": {"foo"}, + "x-vault-header": {"hashed", "hashed"}, } if !reflect.DeepEqual(result, expected) { t.Fatalf("Expected headers did not match actual: Expected %#v\n Got %#v\n", expected, result) } - //Make sure we didn't edit the reqHeaders map + // Make sure we didn't edit the reqHeaders map reqHeadersCopy := map[string][]string{ - "X-Test-Header": []string{"foo"}, - "X-Vault-Header": []string{"bar", "bar"}, - "Content-Type": []string{"json"}, + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, } if !reflect.DeepEqual(reqHeaders, reqHeadersCopy) { t.Fatalf("Req headers were changed, expected %#v\n got %#v", reqHeadersCopy, reqHeaders) } - } func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) { @@ -203,14 +201,14 @@ func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) { } conf.Headers = map[string]*auditedHeaderSettings{ - "X-Test-Header": &auditedHeaderSettings{false}, - "X-Vault-Header": &auditedHeaderSettings{true}, + "X-Test-Header": {false}, + "X-Vault-Header": {true}, } reqHeaders := map[string][]string{ - "X-Test-Header": []string{"foo"}, - "X-Vault-Header": []string{"bar", "bar"}, - "Content-Type": []string{"json"}, + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, } salter, err := salt.NewSalt(context.Background(), nil, nil) diff --git a/vault/auth_test.go b/vault/auth_test.go index 28b69057ec..4ee314d04c 100644 --- a/vault/auth_test.go +++ b/vault/auth_test.go @@ -212,7 +212,7 @@ func TestCore_EnableCredential_Local(t *testing.T) { c.auth = &MountTable{ Type: credentialTableType, Entries: []*MountEntry{ - &MountEntry{ + { Table: credentialTableType, Path: "noop/", Type: "noop", @@ -222,7 +222,7 @@ func TestCore_EnableCredential_Local(t *testing.T) { NamespaceID: namespace.RootNamespaceID, namespace: namespace.RootNamespace, }, - &MountEntry{ + { Table: credentialTableType, Path: "noop2/", Type: "noop", @@ -515,7 +515,8 @@ func TestCore_CredentialInitialize(t *testing.T) { backend := &InitializableBackend{ &NoopBackend{ BackendType: logical.TypeCredential, - }, false} + }, false, + } c, _, _ := TestCoreUnsealed(t) c.credentialBackends["initable"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { @@ -540,7 +541,8 @@ func TestCore_CredentialInitialize(t *testing.T) { backend := &InitializableBackend{ &NoopBackend{ BackendType: logical.TypeCredential, - }, false} + }, false, + } c, _, _ := TestCoreUnsealed(t) c.credentialBackends["initable"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { @@ -550,7 +552,7 @@ func TestCore_CredentialInitialize(t *testing.T) { c.auth = &MountTable{ Type: credentialTableType, Entries: []*MountEntry{ - &MountEntry{ + { Table: credentialTableType, Path: "foo/", Type: "initable", diff --git a/vault/barrier_aes_gcm.go b/vault/barrier_aes_gcm.go index 9d9a917846..edce5cef27 100644 --- a/vault/barrier_aes_gcm.go +++ b/vault/barrier_aes_gcm.go @@ -1168,7 +1168,6 @@ func (b *AESGCMBarrier) CheckBarrierAutoRotate(ctx context.Context) (string, err } return "", nil }() - if err != nil { return "", err } diff --git a/vault/barrier_aes_gcm_test.go b/vault/barrier_aes_gcm_test.go index ff48cbaf59..8200af2c63 100644 --- a/vault/barrier_aes_gcm_test.go +++ b/vault/barrier_aes_gcm_test.go @@ -15,9 +15,7 @@ import ( "github.com/hashicorp/vault/sdk/physical/inmem" ) -var ( - logger = logging.NewVaultLogger(log.Trace) -) +var logger = logging.NewVaultLogger(log.Trace) // mockBarrier returns a physical backend, security barrier, and master key func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) { diff --git a/vault/barrier_view_test.go b/vault/barrier_view_test.go index c13c836bf2..5179048f8f 100644 --- a/vault/barrier_view_test.go +++ b/vault/barrier_view_test.go @@ -182,12 +182,12 @@ func TestBarrierView_Scan(t *testing.T) { expect := []string{} ent := []*logical.StorageEntry{ - &logical.StorageEntry{Key: "foo", Value: []byte("test")}, - &logical.StorageEntry{Key: "zip", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")}, + {Key: "foo", Value: []byte("test")}, + {Key: "zip", Value: []byte("test")}, + {Key: "foo/bar", Value: []byte("test")}, + {Key: "foo/zap", Value: []byte("test")}, + {Key: "foo/bar/baz", Value: []byte("test")}, + {Key: "foo/bar/zoo", Value: []byte("test")}, } for _, e := range ent { @@ -220,12 +220,12 @@ func TestBarrierView_CollectKeys(t *testing.T) { expect := []string{} ent := []*logical.StorageEntry{ - &logical.StorageEntry{Key: "foo", Value: []byte("test")}, - &logical.StorageEntry{Key: "zip", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")}, + {Key: "foo", Value: []byte("test")}, + {Key: "zip", Value: []byte("test")}, + {Key: "foo/bar", Value: []byte("test")}, + {Key: "foo/zap", Value: []byte("test")}, + {Key: "foo/bar/baz", Value: []byte("test")}, + {Key: "foo/bar/zoo", Value: []byte("test")}, } for _, e := range ent { @@ -254,12 +254,12 @@ func TestBarrierView_ClearView(t *testing.T) { expect := []string{} ent := []*logical.StorageEntry{ - &logical.StorageEntry{Key: "foo", Value: []byte("test")}, - &logical.StorageEntry{Key: "zip", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")}, - &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")}, + {Key: "foo", Value: []byte("test")}, + {Key: "zip", Value: []byte("test")}, + {Key: "foo/bar", Value: []byte("test")}, + {Key: "foo/zap", Value: []byte("test")}, + {Key: "foo/bar/baz", Value: []byte("test")}, + {Key: "foo/bar/zoo", Value: []byte("test")}, } for _, e := range ent { diff --git a/vault/cluster/inmem_layer.go b/vault/cluster/inmem_layer.go index 68572e58fc..e65220f8b2 100644 --- a/vault/cluster/inmem_layer.go +++ b/vault/cluster/inmem_layer.go @@ -328,6 +328,7 @@ type inmemAddr struct { func (a inmemAddr) Network() string { return "inmem" } + func (a inmemAddr) String() string { return a.addr } diff --git a/vault/cluster_test.go b/vault/cluster_test.go index e4cea0b905..70af52c841 100644 --- a/vault/cluster_test.go +++ b/vault/cluster_test.go @@ -18,9 +18,7 @@ import ( "github.com/hashicorp/vault/vault/cluster" ) -var ( - clusterTestPausePeriod = 2 * time.Second -) +var clusterTestPausePeriod = 2 * time.Second func TestClusterFetching(t *testing.T) { c, _, _ := TestCoreUnsealed(t) diff --git a/vault/core.go b/vault/core.go index 74199979c4..13966938f7 100644 --- a/vault/core.go +++ b/vault/core.go @@ -680,7 +680,6 @@ type CoreConfig struct { // GetServiceRegistration returns the config's ServiceRegistration, or nil if it does // not exist. func (c *CoreConfig) GetServiceRegistration() sr.ServiceRegistration { - // Check whether there is a ServiceRegistration explicitly configured if c.ServiceRegistration != nil { return c.ServiceRegistration @@ -1916,7 +1915,6 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c if err := c.persistFeatureFlags(ctx); err != nil { return err } - } if c.autoRotateCancel == nil { @@ -2001,7 +1999,7 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c } if !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationDRSecondary) { - //Cannot do this above, as we need other resources like mounts to be setup + // Cannot do this above, as we need other resources like mounts to be setup if err := c.setupPluginReload(); err != nil { return err } diff --git a/vault/core_test.go b/vault/core_test.go index 7b05a3356c..b26b000f2f 100644 --- a/vault/core_test.go +++ b/vault/core_test.go @@ -20,10 +20,8 @@ import ( "github.com/hashicorp/vault/sdk/physical/inmem" ) -var ( - // invalidKey is used to test Unseal - invalidKey = []byte("abcdefghijklmnopqrstuvwxyz")[:17] -) +// invalidKey is used to test Unseal +var invalidKey = []byte("abcdefghijklmnopqrstuvwxyz")[:17] func TestNewCore_badRedirectAddr(t *testing.T) { logger = logging.NewVaultLogger(log.Trace) @@ -2066,7 +2064,7 @@ func TestCore_EnableDisableCred_WithLease(t *testing.T) { return noopBack, nil } - var secretWritingPolicy = ` + secretWritingPolicy := ` name = "admins" path "secret/*" { capabilities = ["update", "create", "read"] @@ -2324,10 +2322,10 @@ func TestCore_HandleRequest_Headers(t *testing.T) { Path: "foo/test", ClientToken: root, Headers: map[string][]string{ - "Should-Passthrough": []string{"foo"}, - "Should-Passthrough-Case-Insensitive": []string{"baz"}, - "Should-Not-Passthrough": []string{"bar"}, - consts.AuthHeaderName: []string{"nope"}, + "Should-Passthrough": {"foo"}, + "Should-Passthrough-Case-Insensitive": {"baz"}, + "Should-Not-Passthrough": {"bar"}, + consts.AuthHeaderName: {"nope"}, }, } _, err = c.HandleRequest(namespace.RootContext(nil), lreq) @@ -2402,7 +2400,7 @@ func TestCore_HandleRequest_Headers_denyList(t *testing.T) { Path: "foo/test", ClientToken: root, Headers: map[string][]string{ - consts.AuthHeaderName: []string{"foo"}, + consts.AuthHeaderName: {"foo"}, }, } _, err = c.HandleRequest(namespace.RootContext(nil), lreq) @@ -2515,7 +2513,6 @@ func (m *mockServiceRegistration) NotifyInitializedStateChange(isInitialized boo // TestCore_ServiceRegistration tests whether standalone ServiceRegistration works func TestCore_ServiceRegistration(t *testing.T) { - // Make a mock service discovery sr := &mockServiceRegistration{} diff --git a/vault/core_util.go b/vault/core_util.go index 3a02f1c484..d9f43a285d 100644 --- a/vault/core_util.go +++ b/vault/core_util.go @@ -19,8 +19,10 @@ const ( activityLogEnabledDefaultValue = "default-disabled" ) -type entCore struct{} -type entCoreConfig struct{} +type ( + entCore struct{} + entCoreConfig struct{} +) func (e entCoreConfig) Clone() entCoreConfig { return entCoreConfig{} @@ -52,6 +54,7 @@ func coreInit(c *Core, conf *CoreConfig) error { } return nil } + func (c *Core) setupReplicationResolverHandler() error { return nil } diff --git a/vault/counters.go b/vault/counters.go index f9dfb32261..c0a914a8eb 100644 --- a/vault/counters.go +++ b/vault/counters.go @@ -192,7 +192,6 @@ type TokenCounter struct { // countActiveTokens returns the number of active tokens func (c *Core) countActiveTokens(ctx context.Context) (*ActiveTokens, error) { - // Get all of the namespaces ns := c.collectNamespaces() @@ -227,7 +226,6 @@ type EntityCounter struct { // countActiveEntities returns the number of active entities func (c *Core) countActiveEntities(ctx context.Context) (*ActiveEntities, error) { - count, err := c.identityStore.countEntities() if err != nil { return nil, err diff --git a/vault/counters_test.go b/vault/counters_test.go index c62ff54ed7..e76958158a 100644 --- a/vault/counters_test.go +++ b/vault/counters_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -//noinspection SpellCheckingInspection +// noinspection SpellCheckingInspection func testParseTime(t *testing.T, format, timeval string) time.Time { t.Helper() tm, err := time.Parse(format, timeval) diff --git a/vault/diagnose/tls.go b/vault/diagnose/tls.go index 51272a409a..c54d49916d 100644 --- a/vault/diagnose/tls.go +++ b/vault/diagnose/tls.go @@ -22,7 +22,6 @@ func ListenerChecks(listeners []listenerutil.Listener) error { // TLSChecks contains manual error checks against the TLS configuration func TLSFileChecks(certFilePath, keyFilePath string) error { - // LoadX509KeyPair will check if the key/cert information can be loaded from files, // if they exist with keys and certs of the same algorithm type, if there // is an unknown algorithm type being used, and if the files have trailing diff --git a/vault/dynamic_system_view_test.go b/vault/dynamic_system_view_test.go index b5a90f1d76..2b5044d9f4 100644 --- a/vault/dynamic_system_view_test.go +++ b/vault/dynamic_system_view_test.go @@ -290,12 +290,15 @@ type fakeBarrier struct { func (b fakeBarrier) Get(context.Context, string) (*logical.StorageEntry, error) { return b.getEntry, b.getErr } + func (b fakeBarrier) List(context.Context, string) ([]string, error) { return nil, fmt.Errorf("not implemented") } + func (b fakeBarrier) Put(context.Context, *logical.StorageEntry) error { return fmt.Errorf("not implemented") } + func (b fakeBarrier) Delete(context.Context, string) error { return fmt.Errorf("not implemented") } diff --git a/vault/expiration.go b/vault/expiration.go index a555fc41ce..457690110e 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -2184,7 +2184,6 @@ func (m *ExpirationManager) leaseAggregationMetrics(ctx context.Context, consts return true } }) - if err != nil { return []metricsutil.GaugeLabelValues{}, suppressRestoreModeError(err) } @@ -2222,9 +2221,7 @@ func (m *ExpirationManager) leaseAggregationMetrics(ctx context.Context, consts // type (though most likely we would only call this from within the "vault" core package.) type ExpirationWalkFunction = func(leaseID string, auth *logical.Auth, path string) bool -var ( - ErrInRestoreMode = errors.New("expiration manager in restore mode") -) +var ErrInRestoreMode = errors.New("expiration manager in restore mode") // WalkTokens extracts the Auth structure from leases corresponding to tokens. // Returning false from the walk function terminates the iteration. diff --git a/vault/expiration_integ_test.go b/vault/expiration_integ_test.go index a8271e4f51..2e2d54fcc9 100644 --- a/vault/expiration_integ_test.go +++ b/vault/expiration_integ_test.go @@ -164,5 +164,4 @@ func TestExpiration_RenewToken_TestCluster(t *testing.T) { if ttl < 4*time.Second { t.Fatal("expected ttl value to be around 5s") } - } diff --git a/vault/expiration_test.go b/vault/expiration_test.go index 77249a6c6d..dfa41c3cfc 100644 --- a/vault/expiration_test.go +++ b/vault/expiration_test.go @@ -27,9 +27,7 @@ import ( "github.com/hashicorp/vault/sdk/physical/inmem" ) -var ( - testImagePull sync.Once -) +var testImagePull sync.Once // mockExpiration returns a mock expiration manager func mockExpiration(t testing.TB) *ExpirationManager { @@ -219,7 +217,6 @@ func TestExpiration_Metrics(t *testing.T) { if !foundLabelOne || !foundLabelTwo { t.Errorf("One of the labels is missing") } - } func TestExpiration_Tidy(t *testing.T) { @@ -643,7 +640,6 @@ func TestExpiration_Restore(t *testing.T) { if exp.leaseCount != 0 { t.Fatalf("expected %v leases, got %v", 0, exp.leaseCount) } - } func TestExpiration_Register(t *testing.T) { @@ -1350,7 +1346,6 @@ func TestExpiration_RenewToken_period(t *testing.T) { if exp.leaseCount != 1 { t.Fatalf("expected %v leases, got %v", 1, exp.leaseCount) } - } func TestExpiration_RenewToken_period_backend(t *testing.T) { @@ -1476,7 +1471,6 @@ func TestExpiration_RenewToken_NotRenewable(t *testing.T) { if resp == nil { t.Fatal("expected a response") } - } func TestExpiration_Renew(t *testing.T) { @@ -2291,7 +2285,7 @@ func badRenewFactory(ctx context.Context, conf *logical.BackendConfig) (logical. }, Secrets: []*framework.Secret{ - &framework.Secret{ + { Type: "badRenewBackend", Revoke: func(context.Context, *logical.Request, *framework.FieldData) (*logical.Response, error) { return nil, fmt.Errorf("always errors") @@ -2413,7 +2407,6 @@ func TestExpiration_WalkTokens(t *testing.T) { tokenEntries = tokenEntries[:len(tokenEntries)-1] } - } func waitForRestore(t *testing.T, exp *ExpirationManager) { diff --git a/vault/external_tests/identity/entities_test.go b/vault/external_tests/identity/entities_test.go index 53b5cb5db2..0bb7b69907 100644 --- a/vault/external_tests/identity/entities_test.go +++ b/vault/external_tests/identity/entities_test.go @@ -375,5 +375,4 @@ func TestIdentityStore_EntityPoliciesInInitialAuth(t *testing.T) { if !strutil.EquivalentSlices(policies, []string{"foo", "bar"}) { t.Fatalf("policy mismatch, got policies: %v", policies) } - } diff --git a/vault/external_tests/metrics/core_metrics_int_test.go b/vault/external_tests/metrics/core_metrics_int_test.go index eb3435dbc8..7f4bd49e8e 100644 --- a/vault/external_tests/metrics/core_metrics_int_test.go +++ b/vault/external_tests/metrics/core_metrics_int_test.go @@ -72,7 +72,6 @@ func TestMountTableMetrics(t *testing.T) { if nonlocalLogicalMountsizeAfterMount <= nonlocalLogicalMountsize { t.Errorf("Mount size does not change after new mount is mounted") } - } func sysMetricsReq(client *api.Client, cluster *vault.TestCluster) (*SysMetricsJSON, error) { diff --git a/vault/external_tests/misc/recovery_test.go b/vault/external_tests/misc/recovery_test.go index e22a8df3d0..f43fff8d12 100644 --- a/vault/external_tests/misc/recovery_test.go +++ b/vault/external_tests/misc/recovery_test.go @@ -40,7 +40,7 @@ func TestRecovery(t *testing.T) { client := cluster.Cores[0].Client rootToken = client.Token() - var fooVal = map[string]interface{}{"bar": 1.0} + fooVal := map[string]interface{}{"bar": 1.0} _, err = client.Logical().Write("secret/foo", fooVal) if err != nil { t.Fatal(err) diff --git a/vault/external_tests/policy/acl_templating_test.go b/vault/external_tests/policy/acl_templating_test.go index 7872f6ad9d..396222363f 100644 --- a/vault/external_tests/policy/acl_templating_test.go +++ b/vault/external_tests/policy/acl_templating_test.go @@ -147,7 +147,7 @@ path "secret/{{ identity.groups.names.foobar.name}}/*" { } clientToken := secret.Auth.ClientToken - var tests = []struct { + tests := []struct { name string path string fail bool diff --git a/vault/external_tests/quotas/quotas_test.go b/vault/external_tests/quotas/quotas_test.go index 28b0021ba5..f37c0e8d20 100644 --- a/vault/external_tests/quotas/quotas_test.go +++ b/vault/external_tests/quotas/quotas_test.go @@ -24,16 +24,14 @@ path "/auth/token/lookup" { ` ) -var ( - coreConfig = &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": pki.Factory, - }, - CredentialBackends: map[string]logical.Factory{ - "userpass": userpass.Factory, - }, - } -) +var coreConfig = &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": pki.Factory, + }, + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, +} func setupMounts(t *testing.T, client *api.Client) { t.Helper() @@ -78,7 +76,6 @@ func setupMounts(t *testing.T, client *api.Client) { if err != nil { t.Fatal(err) } - } func teardownMounts(t *testing.T, client *api.Client) { @@ -197,7 +194,6 @@ func TestQuotas_RateLimit_DupPath(t *testing.T) { if err == nil { t.Fatal("Duplicated paths were accepted") } - } func TestQuotas_RateLimitQuota_ExemptPaths(t *testing.T) { diff --git a/vault/external_tests/raft/raft_test.go b/vault/external_tests/raft/raft_test.go index 8320df2a0a..6ec1c6088b 100644 --- a/vault/external_tests/raft/raft_test.go +++ b/vault/external_tests/raft/raft_test.go @@ -5,8 +5,6 @@ import ( "context" "crypto/md5" "fmt" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/logical" "io/ioutil" "net/http" "strings" @@ -14,6 +12,9 @@ import ( "testing" "time" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/go-cleanhttp" uuid "github.com/hashicorp/go-uuid" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" @@ -47,7 +48,7 @@ func raftCluster(t testing.TB, ropts *RaftClusterOpts) *vault.TestCluster { DisableAutopilot: !ropts.EnableAutopilot, } - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, } opts.InmemClusterLayers = ropts.InmemCluster @@ -119,7 +120,7 @@ func TestRaft_RetryAutoJoin(t *testing.T) { func TestRaft_Retry_Join(t *testing.T) { t.Parallel() var conf vault.CoreConfig - var opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} + opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} teststorage.RaftBackendSetup(&conf, &opts) opts.SetupFunc = nil cluster := vault.NewTestCluster(t, &conf, &opts) @@ -140,7 +141,7 @@ func TestRaft_Retry_Join(t *testing.T) { } leaderInfos := []*raft.LeaderJoinInfo{ - &raft.LeaderJoinInfo{ + { LeaderAPIAddr: leaderAPI, TLSConfig: leaderCore.TLSConfig, Retry: true, @@ -183,7 +184,7 @@ func TestRaft_Retry_Join(t *testing.T) { func TestRaft_Join(t *testing.T) { t.Parallel() var conf vault.CoreConfig - var opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} + opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} teststorage.RaftBackendSetup(&conf, &opts) opts.SetupFunc = nil cluster := vault.NewTestCluster(t, &conf, &opts) @@ -994,7 +995,7 @@ func BenchmarkRaft_SingleNode(b *testing.B) { func TestRaft_Join_InitStatus(t *testing.T) { t.Parallel() var conf vault.CoreConfig - var opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} + opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} teststorage.RaftBackendSetup(&conf, &opts) opts.SetupFunc = nil cluster := vault.NewTestCluster(t, &conf, &opts) diff --git a/vault/external_tests/raftha/raft_ha_test.go b/vault/external_tests/raftha/raft_ha_test.go index 10cfb8ffe8..3a8066f5c6 100644 --- a/vault/external_tests/raftha/raft_ha_test.go +++ b/vault/external_tests/raftha/raft_ha_test.go @@ -55,7 +55,7 @@ func TestRaft_HA_NewCluster(t *testing.T) { func testRaftHANewCluster(t *testing.T, bundler teststorage.PhysicalBackendBundler, addClientCerts bool) { var conf vault.CoreConfig - var opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} + opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} teststorage.RaftHASetup(&conf, &opts, bundler) cluster := vault.NewTestCluster(t, &conf, &opts) diff --git a/vault/external_tests/response/allowed_response_headers_test.go b/vault/external_tests/response/allowed_response_headers_test.go index 2755b620a5..3232b27bf0 100644 --- a/vault/external_tests/response/allowed_response_headers_test.go +++ b/vault/external_tests/response/allowed_response_headers_test.go @@ -26,7 +26,7 @@ func TestIdentityStore_EntityDisabled(t *testing.T) { logical.ReadOperation: func(context.Context, *logical.Request, *framework.FieldData) (*logical.Response, error) { return &logical.Response{ Headers: map[string][]string{ - "www-authenticate": []string{"Negotiate"}, + "www-authenticate": {"Negotiate"}, }, }, logical.CodedError(401, "authentication required") }, @@ -39,7 +39,7 @@ func TestIdentityStore_EntityDisabled(t *testing.T) { return &logical.Response{ Auth: &logical.Auth{}, Headers: map[string][]string{ - "www-authenticate": []string{"Negotiate"}, + "www-authenticate": {"Negotiate"}, }, }, nil }, diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index 1ead7530b6..97d1929b6b 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -14,7 +14,6 @@ import ( type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) func testVariousBackends(t *testing.T, tf testFunc, basePort int, includeRaft bool) { - logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name()) t.Run("inmem", func(t *testing.T) { diff --git a/vault/external_tests/sealmigration/testshared.go b/vault/external_tests/sealmigration/testshared.go index 2d84b150e8..147168565a 100644 --- a/vault/external_tests/sealmigration/testshared.go +++ b/vault/external_tests/sealmigration/testshared.go @@ -33,7 +33,6 @@ const ( ) func ParamTestSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { - // Create the transit server. tss := sealhelper.NewTransitSealServer(t, 0) defer func() { @@ -65,7 +64,6 @@ func ParamTestSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logg } func ParamTestSealMigrationShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { - // Initialize the backend using shamir cluster, _ := initializeShamir(t, logger, storage, basePort) rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys @@ -194,10 +192,10 @@ func ParamTestSealMigration_TransitToTransit(t *testing.T, logger hclog.Logger, func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, sealFunc func() vault.Seal, rootToken string, recoveryKeys [][]byte) { - var baseClusterPort = basePort + 10 + baseClusterPort := basePort + 10 var conf vault.CoreConfig - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ Logger: logger.Named("migrateFromTransitToShamir"), HandlerFunc: http.Handler, NumCores: numTestCores, @@ -267,12 +265,12 @@ func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage } func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte) func() vault.Seal { - var baseClusterPort = basePort + 10 + baseClusterPort := basePort + 10 - var conf = vault.CoreConfig{ + conf := vault.CoreConfig{ DisableAutopilot: true, } - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ Logger: logger.Named("migrateFromShamirToTransit"), HandlerFunc: http.Handler, NumCores: numTestCores, @@ -562,13 +560,13 @@ func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, s func initializeShamir(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) (*vault.TestCluster, *vault.TestClusterOptions) { t.Helper() - var baseClusterPort = basePort + 10 + baseClusterPort := basePort + 10 // Start the cluster - var conf = vault.CoreConfig{ + conf := vault.CoreConfig{ DisableAutopilot: true, } - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ Logger: logger.Named("initializeShamir"), HandlerFunc: http.Handler, NumCores: numTestCores, @@ -615,13 +613,13 @@ func initializeShamir(t *testing.T, logger hclog.Logger, storage teststorage.Reu // runShamir uses a pre-populated backend storage with Shamir. func runShamir(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, barrierKeys [][]byte) { t.Helper() - var baseClusterPort = basePort + 10 + baseClusterPort := basePort + 10 // Start the cluster - var conf = vault.CoreConfig{ + conf := vault.CoreConfig{ DisableAutopilot: true, } - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ Logger: logger.Named("runShamir"), HandlerFunc: http.Handler, NumCores: numTestCores, @@ -686,13 +684,13 @@ func InitializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.Re tss *sealhelper.TransitSealServer, sealKeyName string) (*vault.TestCluster, *vault.TestClusterOptions) { t.Helper() - var baseClusterPort = basePort + 10 + baseClusterPort := basePort + 10 // Start the cluster - var conf = vault.CoreConfig{ + conf := vault.CoreConfig{ DisableAutopilot: true, } - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ Logger: logger.Named("initializeTransit"), HandlerFunc: http.Handler, NumCores: numTestCores, @@ -741,13 +739,13 @@ func InitializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.Re func runAutoseal(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, sealFunc func() vault.Seal) { - var baseClusterPort = basePort + 10 + baseClusterPort := basePort + 10 // Start the cluster - var conf = vault.CoreConfig{ + conf := vault.CoreConfig{ DisableAutopilot: true, } - var opts = vault.TestClusterOptions{ + opts := vault.TestClusterOptions{ Logger: logger.Named("runTransit"), HandlerFunc: http.Handler, NumCores: numTestCores, @@ -823,7 +821,7 @@ func joinRaftFollowers(t *testing.T, cluster *vault.TestCluster, useStoredKeys b vault.TestWaitActive(t, leader.Core) leaderInfos := []*raft.LeaderJoinInfo{ - &raft.LeaderJoinInfo{ + { LeaderAPIAddr: leader.Client.Address(), TLSConfig: leader.TLSConfig, }, diff --git a/vault/external_tests/token/token_test.go b/vault/external_tests/token/token_test.go index 7766095155..3b8f638a05 100644 --- a/vault/external_tests/token/token_test.go +++ b/vault/external_tests/token/token_test.go @@ -353,7 +353,7 @@ func TestTokenStore_IdentityPolicies(t *testing.T) { if err != nil { t.Fatal(err) } - //t.Logf("%#v", *secret) + // t.Logf("%#v", *secret) var resp logical.Response if err := jsonutil.DecodeJSON([]byte(secret.Data["value"].(string)), &resp); err != nil { t.Fatal(err) diff --git a/vault/ha.go b/vault/ha.go index 375112ee3f..d81d18ab72 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -401,7 +401,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // active. func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stopCh chan struct{}) { var manualStepDown bool - var firstIteration = true + firstIteration := true for { // Check for a shutdown select { @@ -728,7 +728,6 @@ func (c *Core) periodicLeaderRefresh(newLeaderCh chan func(), stopCh chan struct default: c.logger.Debug("new leader found, but still processing previous leader change") } - } atomic.AddInt32(lopCount, -1) }() diff --git a/vault/ha_test.go b/vault/ha_test.go index 9e753b2c56..03208b80d1 100644 --- a/vault/ha_test.go +++ b/vault/ha_test.go @@ -17,15 +17,15 @@ import ( func TestGrabLockOrStop(t *testing.T) { // Stop the test early if we deadlock. const ( - workers = 100 + workers = 100 testDuration = time.Second - testTimeout = 10*testDuration + testTimeout = 10 * testDuration ) done := make(chan struct{}) defer close(done) var lockCount int64 go func() { - select{ + select { case <-done: case <-time.After(testTimeout): panic(fmt.Sprintf("deadlock after %d lock count", @@ -56,13 +56,13 @@ func TestGrabLockOrStop(t *testing.T) { go func() { defer closerWg.Done() // Close the stop channel half the time. - if rand.Int() % 2 == 0 { + if rand.Int()%2 == 0 { close(stop) } }() // Half the goroutines lock/unlock and the other half rlock/runlock. - if g % 2 == 0 { + if g%2 == 0 { if !grabLockOrStop(lock.Lock, lock.Unlock, stop) { lock.Unlock() } @@ -81,4 +81,4 @@ func TestGrabLockOrStop(t *testing.T) { }() } workerWg.Wait() -} \ No newline at end of file +} diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index 50f4c5b33c..70dd3c507c 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -554,7 +554,7 @@ func (i *IdentityStore) handleEntityBatchDelete() framework.OperationFunc { for _, bucket := range byBucket { ids := make([]string, len(bucket)) i := 0 - for id, _ := range bucket { + for id := range bucket { ids[i] = id i++ } diff --git a/vault/identity_store_entities_test.go b/vault/identity_store_entities_test.go index 59d7cad554..d3e725a545 100644 --- a/vault/identity_store_entities_test.go +++ b/vault/identity_store_entities_test.go @@ -883,7 +883,6 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { if entityFetched != nil { t.Fatalf("bad: entity; expected: nil, actual: %#v\n", entityFetched) } - } func TestIdentityStore_EntityCRUD(t *testing.T) { diff --git a/vault/identity_store_oidc.go b/vault/identity_store_oidc.go index a48cdd5819..7d9e098f06 100644 --- a/vault/identity_store_oidc.go +++ b/vault/identity_store_oidc.go @@ -103,16 +103,18 @@ const ( roleConfigPath = oidcTokensPrefix + "roles/" ) -var requiredClaims = []string{"iat", "aud", "exp", "iss", "sub", "namespace"} -var supportedAlgs = []string{ - string(jose.RS256), - string(jose.RS384), - string(jose.RS512), - string(jose.ES256), - string(jose.ES384), - string(jose.ES512), - string(jose.EdDSA), -} +var ( + requiredClaims = []string{"iat", "aud", "exp", "iss", "sub", "namespace"} + supportedAlgs = []string{ + string(jose.RS256), + string(jose.RS384), + string(jose.RS512), + string(jose.ES256), + string(jose.ES384), + string(jose.ES512), + string(jose.EdDSA), + } +) // pseudo-namespace for cache items that don't belong to any real namespace. var noNamespace = &namespace.Namespace{ID: "__NO_NAMESPACE"} @@ -160,7 +162,7 @@ func oidcPaths(i *IdentityStore) []*framework.Path { Default: "RS256", }, - "allowed_client_ids": &framework.FieldSchema{ + "allowed_client_ids": { Type: framework.TypeCommaStringSlice, Description: "Comma separated string or array of role client ids allowed to use this key for signing. If empty no roles are allowed. If \"*\" all roles are allowed.", }, @@ -810,7 +812,6 @@ func (tok *idToken) generatePayload(logger hclog.Logger, template string, entity Groups: identity.ToSDKGroups(groups), // namespace? }) - if err != nil { logger.Warn("error populating OIDC token template", "template", template, "error", err) } @@ -915,7 +916,6 @@ func (i *IdentityStore) pathOIDCCreateUpdateRole(ctx context.Context, req *logic Groups: make([]*logical.Group, 0), // namespace? }) - if err != nil { return logical.ErrorResponse("error parsing template: %s", err.Error()), nil } @@ -1482,7 +1482,6 @@ func (i *IdentityStore) expireOIDCPublicKeys(ctx context.Context, s logical.Stor if err := s.Put(ctx, entry); err != nil { i.Logger().Error("error saving key", "key", key.name, "error", err) - } didUpdate = true } diff --git a/vault/identity_store_oidc_test.go b/vault/identity_store_oidc_test.go index 5fc5ba0ff0..6662de179f 100644 --- a/vault/identity_store_oidc_test.go +++ b/vault/identity_store_oidc_test.go @@ -253,7 +253,7 @@ func TestOIDC_Path_OIDCKeyKey(t *testing.T) { Storage: storage, }) expectSuccess(t, resp, err) - //fmt.Printf("resp is:\n%#v", resp) + // fmt.Printf("resp is:\n%#v", resp) // Delete test-key -- should fail because test-role depends on test-key resp, err = c.identityStore.HandleRequest(ctx, &logical.Request{ @@ -560,7 +560,7 @@ func TestOIDC_PeriodicFunc(t *testing.T) { cyclePeriod := 2 * time.Second - var testSets = []struct { + testSets := []struct { namedKey *namedKey testCases []struct { cycle int @@ -718,7 +718,7 @@ func TestOIDC_pathOIDCKeyExistenceCheck(t *testing.T) { &framework.FieldData{ Raw: map[string]interface{}{"name": keyName}, Schema: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, }, }, @@ -747,7 +747,7 @@ func TestOIDC_pathOIDCKeyExistenceCheck(t *testing.T) { &framework.FieldData{ Raw: map[string]interface{}{"name": keyName}, Schema: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, }, }, @@ -778,7 +778,7 @@ func TestOIDC_pathOIDCRoleExistenceCheck(t *testing.T) { &framework.FieldData{ Raw: map[string]interface{}{"name": roleName}, Schema: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, }, }, @@ -807,7 +807,7 @@ func TestOIDC_pathOIDCRoleExistenceCheck(t *testing.T) { &framework.FieldData{ Raw: map[string]interface{}{"name": roleName}, Schema: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, }, }, @@ -1012,9 +1012,9 @@ func TestOIDC_isTargetNamespacedKey(t *testing.T) { func TestOIDC_Flush(t *testing.T) { c := newOIDCCache() ns := []*namespace.Namespace{ - noNamespace, //ns[0] is nilNamespace - &namespace.Namespace{ID: "ns1"}, - &namespace.Namespace{ID: "ns2"}, + noNamespace, // ns[0] is nilNamespace + {ID: "ns1"}, + {ID: "ns2"}, } // populateNs populates cache by ns with some data diff --git a/vault/identity_store_schema.go b/vault/identity_store_schema.go index 6ef28d0dda..bd15bc3b69 100644 --- a/vault/identity_store_schema.go +++ b/vault/identity_store_schema.go @@ -40,14 +40,14 @@ func aliasesTableSchema(lowerCaseName bool) *memdb.TableSchema { return &memdb.TableSchema{ Name: entityAliasesTable, Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", Unique: true, Indexer: &memdb.StringFieldIndex{ Field: "ID", }, }, - "factors": &memdb.IndexSchema{ + "factors": { Name: "factors", Unique: true, Indexer: &memdb.CompoundIndex{ @@ -62,7 +62,7 @@ func aliasesTableSchema(lowerCaseName bool) *memdb.TableSchema { }, }, }, - "namespace_id": &memdb.IndexSchema{ + "namespace_id": { Name: "namespace_id", Indexer: &memdb.StringFieldIndex{ Field: "NamespaceID", @@ -76,14 +76,14 @@ func entitiesTableSchema(lowerCaseName bool) *memdb.TableSchema { return &memdb.TableSchema{ Name: entitiesTable, Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", Unique: true, Indexer: &memdb.StringFieldIndex{ Field: "ID", }, }, - "name": &memdb.IndexSchema{ + "name": { Name: "name", Unique: true, Indexer: &memdb.CompoundIndex{ @@ -98,7 +98,7 @@ func entitiesTableSchema(lowerCaseName bool) *memdb.TableSchema { }, }, }, - "merged_entity_ids": &memdb.IndexSchema{ + "merged_entity_ids": { Name: "merged_entity_ids", Unique: true, AllowMissing: true, @@ -106,13 +106,13 @@ func entitiesTableSchema(lowerCaseName bool) *memdb.TableSchema { Field: "MergedEntityIDs", }, }, - "bucket_key": &memdb.IndexSchema{ + "bucket_key": { Name: "bucket_key", Indexer: &memdb.StringFieldIndex{ Field: "BucketKey", }, }, - "namespace_id": &memdb.IndexSchema{ + "namespace_id": { Name: "namespace_id", Indexer: &memdb.StringFieldIndex{ Field: "NamespaceID", @@ -162,13 +162,13 @@ func groupsTableSchema(lowerCaseName bool) *memdb.TableSchema { Field: "ParentGroupIDs", }, }, - "bucket_key": &memdb.IndexSchema{ + "bucket_key": { Name: "bucket_key", Indexer: &memdb.StringFieldIndex{ Field: "BucketKey", }, }, - "namespace_id": &memdb.IndexSchema{ + "namespace_id": { Name: "namespace_id", Indexer: &memdb.StringFieldIndex{ Field: "NamespaceID", @@ -182,14 +182,14 @@ func groupAliasesTableSchema(lowerCaseName bool) *memdb.TableSchema { return &memdb.TableSchema{ Name: groupAliasesTable, Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ + "id": { Name: "id", Unique: true, Indexer: &memdb.StringFieldIndex{ Field: "ID", }, }, - "factors": &memdb.IndexSchema{ + "factors": { Name: "factors", Unique: true, Indexer: &memdb.CompoundIndex{ @@ -204,7 +204,7 @@ func groupAliasesTableSchema(lowerCaseName bool) *memdb.TableSchema { }, }, }, - "namespace_id": &memdb.IndexSchema{ + "namespace_id": { Name: "namespace_id", Indexer: &memdb.StringFieldIndex{ Field: "NamespaceID", diff --git a/vault/identity_store_structs.go b/vault/identity_store_structs.go index c0f1f97aba..00306e437e 100644 --- a/vault/identity_store_structs.go +++ b/vault/identity_store_structs.go @@ -17,10 +17,8 @@ const ( entityPrefix = "entity/" ) -var ( - // metaKeyFormatRegEx checks if a metadata key string is valid - metaKeyFormatRegEx = regexp.MustCompile(`^[a-zA-Z0-9=/+_-]+$`).MatchString -) +// metaKeyFormatRegEx checks if a metadata key string is valid +var metaKeyFormatRegEx = regexp.MustCompile(`^[a-zA-Z0-9=/+_-]+$`).MatchString const ( // The meta key prefix reserved for Vault's internal use diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index 3d45fec8b9..82ad82cab5 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -653,7 +653,6 @@ func expectSingleCount(t *testing.T, sink *metrics.InmemSink, keyPrefix string) if counter.Sum != 1.0 { t.Errorf("Counter sum %v is not 1.", counter.Sum) } - } func TestIdentityStore_NewEntityCounter(t *testing.T) { diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index fdcfd90719..1309dc9318 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -22,9 +22,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -var ( - errDuplicateIdentityName = errors.New("duplicate identity name") -) +var errDuplicateIdentityName = errors.New("duplicate identity name") func (c *Core) SetLoadCaseSensitiveIdentityStore(caseSensitive bool) { c.loadCaseSensitiveIdentityStore = caseSensitive @@ -1894,7 +1892,6 @@ func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(ctx context.Co } refreshFunc := func(dryRun bool) (bool, []*logical.Alias, error) { - if !dryRun { i.groupLock.Lock() defer i.groupLock.Unlock() diff --git a/vault/init.go b/vault/init.go index 5fd69876a7..3afde6b81e 100644 --- a/vault/init.go +++ b/vault/init.go @@ -144,7 +144,7 @@ func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) { // If we have PGP keys, perform the encryption if len(sc.PGPKeys) > 0 { hexEncodedShares := make([][]byte, len(unsealKeys)) - for i, _ := range unsealKeys { + for i := range unsealKeys { hexEncodedShares[i] = []byte(hex.EncodeToString(unsealKeys[i])) } _, encryptedShares, err := pgpkeys.EncryptShares(hexEncodedShares, sc.PGPKeys) diff --git a/vault/logical_passthrough.go b/vault/logical_passthrough.go index aefdf19ac4..23d854dc27 100644 --- a/vault/logical_passthrough.go +++ b/vault/logical_passthrough.go @@ -62,7 +62,7 @@ func LeaseSwitchedPassthroughBackend(ctx context.Context, conf *logical.BackendC } b.Backend.Secrets = []*framework.Secret{ - &framework.Secret{ + { Type: "kv", Renew: b.handleRead, diff --git a/vault/logical_raw.go b/vault/logical_raw.go index 3c73fc303f..f845c9c487 100644 --- a/vault/logical_raw.go +++ b/vault/logical_raw.go @@ -11,15 +11,13 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -var ( - // protectedPaths cannot be accessed via the raw APIs. - // This is both for security and to prevent disrupting Vault. - protectedPaths = []string{ - keyringPath, - // Changing the cluster info path can change the cluster ID which can be disruptive - coreLocalClusterInfoPath, - } -) +// protectedPaths cannot be accessed via the raw APIs. +// This is both for security and to prevent disrupting Vault. +var protectedPaths = []string{ + keyringPath, + // Changing the cluster info path can change the cluster ID which can be disruptive + coreLocalClusterInfoPath, +} type RawBackend struct { *framework.Backend @@ -179,14 +177,14 @@ func (b *RawBackend) handleRawList(ctx context.Context, req *logical.Request, da func rawPaths(prefix string, r *RawBackend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: prefix + "(raw/?$|raw/(?P.+))", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, }, - "value": &framework.FieldSchema{ + "value": { Type: framework.TypeString, }, }, diff --git a/vault/logical_system.go b/vault/logical_system.go index 5d7434242f..cd13bc455f 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -42,8 +42,10 @@ import ( "github.com/mitchellh/mapstructure" ) -const maxBytes = 128 * 1024 -const globalScope = "global" +const ( + maxBytes = 128 * 1024 + globalScope = "global" +) func systemBackendMemDBSchema() *memdb.DBSchema { systemSchema := &memdb.DBSchema{ @@ -3605,7 +3607,6 @@ func (b *SystemBackend) pathInternalUIResultantACL(ctx context.Context, req *log } func (b *SystemBackend) pathInternalOpenAPI(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Limit output to authorized paths resp, err := b.pathInternalUIMountsRead(ctx, req, d) if err != nil { @@ -3917,7 +3918,6 @@ func (b *SystemBackend) rotateBarrierKey(ctx context.Context) error { } return nil - } func sanitizePath(path string) string { diff --git a/vault/logical_system_activity.go b/vault/logical_system_activity.go index ece572d300..ca1a8fa5d9 100644 --- a/vault/logical_system_activity.go +++ b/vault/logical_system_activity.go @@ -17,11 +17,11 @@ func (b *SystemBackend) activityQueryPath() *framework.Path { return &framework.Path{ Pattern: "internal/counters/activity$", Fields: map[string]*framework.FieldSchema{ - "start_time": &framework.FieldSchema{ + "start_time": { Type: framework.TypeTime, Description: "Start of query interval", }, - "end_time": &framework.FieldSchema{ + "end_time": { Type: framework.TypeTime, Description: "End of query interval", }, diff --git a/vault/logical_system_paths.go b/vault/logical_system_paths.go index 8b9f87e951..d990253fce 100644 --- a/vault/logical_system_paths.go +++ b/vault/logical_system_paths.go @@ -13,15 +13,15 @@ func (b *SystemBackend) configPaths() []*framework.Path { Pattern: "config/cors$", Fields: map[string]*framework.FieldSchema{ - "enable": &framework.FieldSchema{ + "enable": { Type: framework.TypeBool, Description: "Enables or disables CORS headers on requests.", }, - "allowed_origins": &framework.FieldSchema{ + "allowed_origins": { Type: framework.TypeCommaStringSlice, Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.", }, - "allowed_headers": &framework.FieldSchema{ + "allowed_headers": { Type: framework.TypeCommaStringSlice, Description: "A comma-separated string or array of strings indicating headers that are allowed on cross-origin requests.", }, @@ -63,15 +63,15 @@ func (b *SystemBackend) configPaths() []*framework.Path { Pattern: "config/ui/headers/" + framework.GenericNameRegex("header"), Fields: map[string]*framework.FieldSchema{ - "header": &framework.FieldSchema{ + "header": { Type: framework.TypeString, Description: "The name of the header.", }, - "values": &framework.FieldSchema{ + "values": { Type: framework.TypeStringSlice, Description: "The values to set the header.", }, - "multivalue": &framework.FieldSchema{ + "multivalue": { Type: framework.TypeBool, Description: "Returns multiple values if true", }, @@ -113,7 +113,7 @@ func (b *SystemBackend) configPaths() []*framework.Path { { Pattern: "generate-root(/attempt)?$", Fields: map[string]*framework.FieldSchema{ - "pgp_key": &framework.FieldSchema{ + "pgp_key": { Type: framework.TypeString, Description: "Specifies a base64-encoded PGP public key.", }, @@ -137,11 +137,11 @@ func (b *SystemBackend) configPaths() []*framework.Path { { Pattern: "generate-root/update$", Fields: map[string]*framework.FieldSchema{ - "key": &framework.FieldSchema{ + "key": { Type: framework.TypeString, Description: "Specifies a single master key share.", }, - "nonce": &framework.FieldSchema{ + "nonce": { Type: framework.TypeString, Description: "Specifies the nonce of the attempt.", }, @@ -159,35 +159,35 @@ func (b *SystemBackend) configPaths() []*framework.Path { { Pattern: "health$", Fields: map[string]*framework.FieldSchema{ - "standbyok": &framework.FieldSchema{ + "standbyok": { Type: framework.TypeBool, Description: "Specifies if being a standby should still return the active status code.", }, - "perfstandbyok": &framework.FieldSchema{ + "perfstandbyok": { Type: framework.TypeBool, Description: "Specifies if being a performance standby should still return the active status code.", }, - "activecode": &framework.FieldSchema{ + "activecode": { Type: framework.TypeInt, Description: "Specifies the status code for an active node.", }, - "standbycode": &framework.FieldSchema{ + "standbycode": { Type: framework.TypeInt, Description: "Specifies the status code for a standby node.", }, - "drsecondarycode": &framework.FieldSchema{ + "drsecondarycode": { Type: framework.TypeInt, Description: "Specifies the status code for a DR secondary node.", }, - "performancestandbycode": &framework.FieldSchema{ + "performancestandbycode": { Type: framework.TypeInt, Description: "Specifies the status code for a performance standby node.", }, - "sealedcode": &framework.FieldSchema{ + "sealedcode": { Type: framework.TypeInt, Description: "Specifies the status code for a sealed node.", }, - "uninitcode": &framework.FieldSchema{ + "uninitcode": { Type: framework.TypeInt, Description: "Specifies the status code for an uninitialized node.", }, @@ -212,35 +212,35 @@ func (b *SystemBackend) configPaths() []*framework.Path { { Pattern: "init$", Fields: map[string]*framework.FieldSchema{ - "pgp_keys": &framework.FieldSchema{ + "pgp_keys": { Type: framework.TypeCommaStringSlice, Description: "Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as `secret_shares`.", }, - "root_token_pgp_key": &framework.FieldSchema{ + "root_token_pgp_key": { Type: framework.TypeString, Description: "Specifies a PGP public key used to encrypt the initial root token. The key must be base64-encoded from its original binary representation.", }, - "secret_shares": &framework.FieldSchema{ + "secret_shares": { Type: framework.TypeInt, Description: "Specifies the number of shares to split the master key into.", }, - "secret_threshold": &framework.FieldSchema{ + "secret_threshold": { Type: framework.TypeInt, Description: "Specifies the number of shares required to reconstruct the master key. This must be less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as `secret_shares`.", }, - "stored_shares": &framework.FieldSchema{ + "stored_shares": { Type: framework.TypeInt, Description: "Specifies the number of shares that should be encrypted by the HSM and stored for auto-unsealing. Currently must be the same as `secret_shares`.", }, - "recovery_shares": &framework.FieldSchema{ + "recovery_shares": { Type: framework.TypeInt, Description: "Specifies the number of shares to split the recovery key into.", }, - "recovery_threshold": &framework.FieldSchema{ + "recovery_threshold": { Type: framework.TypeInt, Description: " Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to `recovery_shares`.", }, - "recovery_pgp_keys": &framework.FieldSchema{ + "recovery_pgp_keys": { Type: framework.TypeCommaStringSlice, Description: "Specifies an array of PGP public keys used to encrypt the output recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as `recovery_shares`.", }, @@ -280,23 +280,23 @@ func (b *SystemBackend) rekeyPaths() []*framework.Path { Pattern: "rekey/init", Fields: map[string]*framework.FieldSchema{ - "secret_shares": &framework.FieldSchema{ + "secret_shares": { Type: framework.TypeInt, Description: "Specifies the number of shares to split the master key into.", }, - "secret_threshold": &framework.FieldSchema{ + "secret_threshold": { Type: framework.TypeInt, Description: "Specifies the number of shares required to reconstruct the master key. This must be less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as secret_shares.", }, - "pgp_keys": &framework.FieldSchema{ + "pgp_keys": { Type: framework.TypeCommaStringSlice, Description: "Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as secret_shares.", }, - "backup": &framework.FieldSchema{ + "backup": { Type: framework.TypeBool, Description: "Specifies if using PGP-encrypted keys, whether Vault should also store a plaintext backup of the PGP-encrypted keys.", }, - "require_verification": &framework.FieldSchema{ + "require_verification": { Type: framework.TypeBool, Description: "Turns on verification functionality", }, @@ -353,11 +353,11 @@ func (b *SystemBackend) rekeyPaths() []*framework.Path { Pattern: "rekey/update", Fields: map[string]*framework.FieldSchema{ - "key": &framework.FieldSchema{ + "key": { Type: framework.TypeString, Description: "Specifies a single master key share.", }, - "nonce": &framework.FieldSchema{ + "nonce": { Type: framework.TypeString, Description: "Specifies the nonce of the rekey attempt.", }, @@ -373,11 +373,11 @@ func (b *SystemBackend) rekeyPaths() []*framework.Path { Pattern: "rekey/verify", Fields: map[string]*framework.FieldSchema{ - "key": &framework.FieldSchema{ + "key": { Type: framework.TypeString, Description: "Specifies a single master share key from the new set of shares.", }, - "nonce": &framework.FieldSchema{ + "nonce": { Type: framework.TypeString, Description: "Specifies the nonce of the rekey verification operation.", }, @@ -411,11 +411,11 @@ func (b *SystemBackend) rekeyPaths() []*framework.Path { { Pattern: "unseal$", Fields: map[string]*framework.FieldSchema{ - "key": &framework.FieldSchema{ + "key": { Type: framework.TypeString, Description: "Specifies a single master key share. This is required unless reset is true.", }, - "reset": &framework.FieldSchema{ + "reset": { Type: framework.TypeBool, Description: "Specifies if previously-provided unseal keys are discarded and the unseal process is reset.", }, @@ -468,12 +468,12 @@ func (b *SystemBackend) auditPaths() []*framework.Path { Pattern: "audit-hash/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["audit_path"][0]), }, - "input": &framework.FieldSchema{ + "input": { Type: framework.TypeString, }, }, @@ -504,23 +504,23 @@ func (b *SystemBackend) auditPaths() []*framework.Path { Pattern: "audit/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["audit_path"][0]), }, - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["audit_type"][0]), }, - "description": &framework.FieldSchema{ + "description": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["audit_desc"][0]), }, - "options": &framework.FieldSchema{ + "options": { Type: framework.TypeKVPairs, Description: strings.TrimSpace(sysHelp["audit_opts"][0]), }, - "local": &framework.FieldSchema{ + "local": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["mount_local"][0]), @@ -546,10 +546,10 @@ func (b *SystemBackend) auditPaths() []*framework.Path { Pattern: "config/auditing/request-headers/(?P
.+)", Fields: map[string]*framework.FieldSchema{ - "header": &framework.FieldSchema{ + "header": { Type: framework.TypeString, }, - "hmac": &framework.FieldSchema{ + "hmac": { Type: framework.TypeBool, }, }, @@ -605,15 +605,15 @@ func (b *SystemBackend) sealPaths() []*framework.Path { { Pattern: "rotate/config$", Fields: map[string]*framework.FieldSchema{ - "enabled": &framework.FieldSchema{ + "enabled": { Type: framework.TypeBool, Description: strings.TrimSpace(sysHelp["rotation-enabled"][0]), }, - "max_operations": &framework.FieldSchema{ - Type: framework.TypeInt, //64? + "max_operations": { + Type: framework.TypeInt, // 64? Description: strings.TrimSpace(sysHelp["rotation-max-operations"][0]), }, - "interval": &framework.FieldSchema{ + "interval": { Type: framework.TypeDurationSecond, Description: strings.TrimSpace(sysHelp["rotation-interval"][0]), }, @@ -652,31 +652,31 @@ func (b *SystemBackend) pluginsCatalogCRUDPath() *framework.Path { Pattern: "plugins/catalog(/(?Pauth|database|secret))?/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-catalog_name"][0]), }, - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-catalog_type"][0]), }, - "sha256": &framework.FieldSchema{ + "sha256": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]), }, - "sha_256": &framework.FieldSchema{ + "sha_256": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]), }, - "command": &framework.FieldSchema{ + "command": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-catalog_command"][0]), }, - "args": &framework.FieldSchema{ + "args": { Type: framework.TypeStringSlice, Description: strings.TrimSpace(sysHelp["plugin-catalog_args"][0]), }, - "env": &framework.FieldSchema{ + "env": { Type: framework.TypeStringSlice, Description: strings.TrimSpace(sysHelp["plugin-catalog_env"][0]), }, @@ -708,7 +708,7 @@ func (b *SystemBackend) pluginsCatalogListPaths() []*framework.Path { Pattern: "plugins/catalog/(?Pauth|database|secret)/?$", Fields: map[string]*framework.FieldSchema{ - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-catalog_type"][0]), }, @@ -742,15 +742,15 @@ func (b *SystemBackend) pluginsReloadPath() *framework.Path { Pattern: "plugins/reload/backend$", Fields: map[string]*framework.FieldSchema{ - "plugin": &framework.FieldSchema{ + "plugin": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-backend-reload-plugin"][0]), }, - "mounts": &framework.FieldSchema{ + "mounts": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["plugin-backend-reload-mounts"][0]), }, - "scope": &framework.FieldSchema{ + "scope": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["plugin-backend-reload-scope"][0]), }, @@ -774,12 +774,12 @@ func (b *SystemBackend) toolsPaths() []*framework.Path { { Pattern: "tools/hash" + framework.OptionalParamRegex("urlalgorithm"), Fields: map[string]*framework.FieldSchema{ - "input": &framework.FieldSchema{ + "input": { Type: framework.TypeString, Description: "The base64-encoded input data", }, - "algorithm": &framework.FieldSchema{ + "algorithm": { Type: framework.TypeString, Default: "sha2-256", Description: `Algorithm to use (POST body parameter). Valid values are: @@ -792,12 +792,12 @@ func (b *SystemBackend) toolsPaths() []*framework.Path { Defaults to "sha2-256".`, }, - "urlalgorithm": &framework.FieldSchema{ + "urlalgorithm": { Type: framework.TypeString, Description: `Algorithm to use (POST URL parameter)`, }, - "format": &framework.FieldSchema{ + "format": { Type: framework.TypeString, Default: "hex", Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`, @@ -815,18 +815,18 @@ func (b *SystemBackend) toolsPaths() []*framework.Path { { Pattern: "tools/random" + framework.OptionalParamRegex("urlbytes"), Fields: map[string]*framework.FieldSchema{ - "urlbytes": &framework.FieldSchema{ + "urlbytes": { Type: framework.TypeString, Description: "The number of bytes to generate (POST URL parameter)", }, - "bytes": &framework.FieldSchema{ + "bytes": { Type: framework.TypeInt, Default: 32, Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).", }, - "format": &framework.FieldSchema{ + "format": { Type: framework.TypeString, Default: "base64", Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`, @@ -848,7 +848,7 @@ func (b *SystemBackend) internalPaths() []*framework.Path { { Pattern: "internal/specs/openapi", Fields: map[string]*framework.FieldSchema{ - "context": &framework.FieldSchema{ + "context": { Type: framework.TypeString, Description: "Context string appended to every operationId", }, @@ -892,7 +892,7 @@ func (b *SystemBackend) internalPaths() []*framework.Path { { Pattern: "internal/ui/mounts/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: "The path of the mount.", }, @@ -970,16 +970,16 @@ func (b *SystemBackend) capabilitiesPaths() []*framework.Path { Pattern: "capabilities-accessor$", Fields: map[string]*framework.FieldSchema{ - "accessor": &framework.FieldSchema{ + "accessor": { Type: framework.TypeString, Description: "Accessor of the token for which capabilities are being queried.", }, - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeCommaStringSlice, Description: "Use 'paths' instead.", Deprecated: true, }, - "paths": &framework.FieldSchema{ + "paths": { Type: framework.TypeCommaStringSlice, Description: "Paths on which capabilities are being queried.", }, @@ -997,16 +997,16 @@ func (b *SystemBackend) capabilitiesPaths() []*framework.Path { Pattern: "capabilities$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token for which capabilities are being queried.", }, - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeCommaStringSlice, Description: "Use 'paths' instead.", Deprecated: true, }, - "paths": &framework.FieldSchema{ + "paths": { Type: framework.TypeCommaStringSlice, Description: "Paths on which capabilities are being queried.", }, @@ -1024,16 +1024,16 @@ func (b *SystemBackend) capabilitiesPaths() []*framework.Path { Pattern: "capabilities-self$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token for which capabilities are being queried.", }, - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeCommaStringSlice, Description: "Use 'paths' instead.", Deprecated: true, }, - "paths": &framework.FieldSchema{ + "paths": { Type: framework.TypeCommaStringSlice, Description: "Paths on which capabilities are being queried.", }, @@ -1055,7 +1055,7 @@ func (b *SystemBackend) leasePaths() []*framework.Path { Pattern: "leases/lookup/(?P.+?)?", Fields: map[string]*framework.FieldSchema{ - "prefix": &framework.FieldSchema{ + "prefix": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["leases-list-prefix"][0]), }, @@ -1076,7 +1076,7 @@ func (b *SystemBackend) leasePaths() []*framework.Path { Pattern: "leases/lookup", Fields: map[string]*framework.FieldSchema{ - "lease_id": &framework.FieldSchema{ + "lease_id": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["lease_id"][0]), }, @@ -1097,15 +1097,15 @@ func (b *SystemBackend) leasePaths() []*framework.Path { Pattern: "(leases/)?renew" + framework.OptionalParamRegex("url_lease_id"), Fields: map[string]*framework.FieldSchema{ - "url_lease_id": &framework.FieldSchema{ + "url_lease_id": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["lease_id"][0]), }, - "lease_id": &framework.FieldSchema{ + "lease_id": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["lease_id"][0]), }, - "increment": &framework.FieldSchema{ + "increment": { Type: framework.TypeDurationSecond, Description: strings.TrimSpace(sysHelp["increment"][0]), }, @@ -1126,15 +1126,15 @@ func (b *SystemBackend) leasePaths() []*framework.Path { Pattern: "(leases/)?revoke" + framework.OptionalParamRegex("url_lease_id"), Fields: map[string]*framework.FieldSchema{ - "url_lease_id": &framework.FieldSchema{ + "url_lease_id": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["lease_id"][0]), }, - "lease_id": &framework.FieldSchema{ + "lease_id": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["lease_id"][0]), }, - "sync": &framework.FieldSchema{ + "sync": { Type: framework.TypeBool, Default: true, Description: strings.TrimSpace(sysHelp["revoke-sync"][0]), @@ -1156,7 +1156,7 @@ func (b *SystemBackend) leasePaths() []*framework.Path { Pattern: "(leases/)?revoke-force/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "prefix": &framework.FieldSchema{ + "prefix": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["revoke-force-path"][0]), }, @@ -1178,11 +1178,11 @@ func (b *SystemBackend) leasePaths() []*framework.Path { Pattern: "(leases/)?revoke-prefix/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "prefix": &framework.FieldSchema{ + "prefix": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["revoke-prefix-path"][0]), }, - "sync": &framework.FieldSchema{ + "sync": { Type: framework.TypeBool, Default: true, Description: strings.TrimSpace(sysHelp["revoke-sync"][0]), @@ -1218,11 +1218,11 @@ func (b *SystemBackend) remountPath() *framework.Path { Pattern: "remount", Fields: map[string]*framework.FieldSchema{ - "from": &framework.FieldSchema{ + "from": { Type: framework.TypeString, Description: "The previous mount point.", }, - "to": &framework.FieldSchema{ + "to": { Type: framework.TypeString, Description: "The new mount point.", }, @@ -1241,7 +1241,7 @@ func (b *SystemBackend) metricsPath() *framework.Path { return &framework.Path{ Pattern: "metrics", Fields: map[string]*framework.FieldSchema{ - "format": &framework.FieldSchema{ + "format": { Type: framework.TypeString, Description: "Format to export metrics into. Currently accepts only \"prometheus\".", Query: true, @@ -1253,14 +1253,13 @@ func (b *SystemBackend) metricsPath() *framework.Path { HelpSynopsis: strings.TrimSpace(sysHelp["metrics"][0]), HelpDescription: strings.TrimSpace(sysHelp["metrics"][1]), } - } func (b *SystemBackend) monitorPath() *framework.Path { return &framework.Path{ Pattern: "monitor", Fields: map[string]*framework.FieldSchema{ - "log_level": &framework.FieldSchema{ + "log_level": { Type: framework.TypeString, Description: "Log level to view system logs at. Currently supported values are \"trace\", \"debug\", \"info\", \"warn\", \"error\".", Query: true, @@ -1272,7 +1271,6 @@ func (b *SystemBackend) monitorPath() *framework.Path { HelpSynopsis: strings.TrimSpace(sysHelp["monitor"][0]), HelpDescription: strings.TrimSpace(sysHelp["monitor"][1]), } - } func (b *SystemBackend) hostInfoPath() *framework.Path { @@ -1303,47 +1301,47 @@ func (b *SystemBackend) authPaths() []*framework.Path { { Pattern: "auth/(?P.+?)/tune$", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_tune"][0]), }, - "default_lease_ttl": &framework.FieldSchema{ + "default_lease_ttl": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]), }, - "max_lease_ttl": &framework.FieldSchema{ + "max_lease_ttl": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]), }, - "description": &framework.FieldSchema{ + "description": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_desc"][0]), }, - "audit_non_hmac_request_keys": &framework.FieldSchema{ + "audit_non_hmac_request_keys": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_request_keys"][0]), }, - "audit_non_hmac_response_keys": &framework.FieldSchema{ + "audit_non_hmac_response_keys": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_response_keys"][0]), }, - "options": &framework.FieldSchema{ + "options": { Type: framework.TypeKVPairs, Description: strings.TrimSpace(sysHelp["tune_mount_options"][0]), }, - "listing_visibility": &framework.FieldSchema{ + "listing_visibility": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["listing_visibility"][0]), }, - "passthrough_request_headers": &framework.FieldSchema{ + "passthrough_request_headers": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["passthrough_request_headers"][0]), }, - "allowed_response_headers": &framework.FieldSchema{ + "allowed_response_headers": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["allowed_response_headers"][0]), }, - "token_type": &framework.FieldSchema{ + "token_type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["token_type"][0]), }, @@ -1366,42 +1364,42 @@ func (b *SystemBackend) authPaths() []*framework.Path { { Pattern: "auth/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_path"][0]), }, - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_type"][0]), }, - "description": &framework.FieldSchema{ + "description": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_desc"][0]), }, - "config": &framework.FieldSchema{ + "config": { Type: framework.TypeMap, Description: strings.TrimSpace(sysHelp["auth_config"][0]), }, - "local": &framework.FieldSchema{ + "local": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["mount_local"][0]), }, - "seal_wrap": &framework.FieldSchema{ + "seal_wrap": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["seal_wrap"][0]), }, - "external_entropy_access": &framework.FieldSchema{ + "external_entropy_access": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["external_entropy_access"][0]), }, - "plugin_name": &framework.FieldSchema{ + "plugin_name": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_plugin"][0]), }, - "options": &framework.FieldSchema{ + "options": { Type: framework.TypeKVPairs, Description: strings.TrimSpace(sysHelp["auth_options"][0]), }, @@ -1443,16 +1441,16 @@ func (b *SystemBackend) policyPaths() []*framework.Path { Pattern: "policy/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["policy-name"][0]), }, - "rules": &framework.FieldSchema{ + "rules": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["policy-rules"][0]), Deprecated: true, }, - "policy": &framework.FieldSchema{ + "policy": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["policy-rules"][0]), }, @@ -1492,11 +1490,11 @@ func (b *SystemBackend) policyPaths() []*framework.Path { Pattern: "policies/acl/(?P.+)", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["policy-name"][0]), }, - "policy": &framework.FieldSchema{ + "policy": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["policy-rules"][0]), }, @@ -1525,7 +1523,7 @@ func (b *SystemBackend) policyPaths() []*framework.Path { Pattern: "policies/password/(?P.+)/generate$", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The name of the password policy.", }, @@ -1546,11 +1544,11 @@ func (b *SystemBackend) policyPaths() []*framework.Path { Pattern: "policies/password/(?P.+)$", Fields: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The name of the password policy.", }, - "policy": &framework.FieldSchema{ + "policy": { Type: framework.TypeString, Description: "The password policy", }, @@ -1595,7 +1593,7 @@ func (b *SystemBackend) wrappingPaths() []*framework.Path { Pattern: "wrapping/unwrap$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, }, }, @@ -1612,7 +1610,7 @@ func (b *SystemBackend) wrappingPaths() []*framework.Path { Pattern: "wrapping/lookup$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, }, }, @@ -1636,7 +1634,7 @@ func (b *SystemBackend) wrappingPaths() []*framework.Path { Pattern: "wrapping/rewrap$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, }, }, @@ -1657,47 +1655,47 @@ func (b *SystemBackend) mountPaths() []*framework.Path { Pattern: "mounts/(?P.+?)/tune$", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["mount_path"][0]), }, - "default_lease_ttl": &framework.FieldSchema{ + "default_lease_ttl": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]), }, - "max_lease_ttl": &framework.FieldSchema{ + "max_lease_ttl": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]), }, - "description": &framework.FieldSchema{ + "description": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_desc"][0]), }, - "audit_non_hmac_request_keys": &framework.FieldSchema{ + "audit_non_hmac_request_keys": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_request_keys"][0]), }, - "audit_non_hmac_response_keys": &framework.FieldSchema{ + "audit_non_hmac_response_keys": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_response_keys"][0]), }, - "options": &framework.FieldSchema{ + "options": { Type: framework.TypeKVPairs, Description: strings.TrimSpace(sysHelp["tune_mount_options"][0]), }, - "listing_visibility": &framework.FieldSchema{ + "listing_visibility": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["listing_visibility"][0]), }, - "passthrough_request_headers": &framework.FieldSchema{ + "passthrough_request_headers": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["passthrough_request_headers"][0]), }, - "allowed_response_headers": &framework.FieldSchema{ + "allowed_response_headers": { Type: framework.TypeCommaStringSlice, Description: strings.TrimSpace(sysHelp["allowed_response_headers"][0]), }, - "token_type": &framework.FieldSchema{ + "token_type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["token_type"][0]), }, @@ -1716,42 +1714,42 @@ func (b *SystemBackend) mountPaths() []*framework.Path { Pattern: "mounts/(?P.+?)", Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ + "path": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["mount_path"][0]), }, - "type": &framework.FieldSchema{ + "type": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["mount_type"][0]), }, - "description": &framework.FieldSchema{ + "description": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["mount_desc"][0]), }, - "config": &framework.FieldSchema{ + "config": { Type: framework.TypeMap, Description: strings.TrimSpace(sysHelp["mount_config"][0]), }, - "local": &framework.FieldSchema{ + "local": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["mount_local"][0]), }, - "seal_wrap": &framework.FieldSchema{ + "seal_wrap": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["seal_wrap"][0]), }, - "external_entropy_access": &framework.FieldSchema{ + "external_entropy_access": { Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["external_entropy_access"][0]), }, - "plugin_name": &framework.FieldSchema{ + "plugin_name": { Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["mount_plugin_name"][0]), }, - "options": &framework.FieldSchema{ + "options": { Type: framework.TypeKVPairs, Description: strings.TrimSpace(sysHelp["mount_options"][0]), }, diff --git a/vault/logical_system_raft.go b/vault/logical_system_raft.go index 08cd536e43..8db5f2d331 100644 --- a/vault/logical_system_raft.go +++ b/vault/logical_system_raft.go @@ -638,7 +638,6 @@ func (b *SystemBackend) handleStorageRaftSnapshotWrite(force bool) framework.Ope } return nil - }() return nil, nil diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go index 7296d05bf0..8ae717cfd2 100644 --- a/vault/logical_system_test.go +++ b/vault/logical_system_test.go @@ -138,7 +138,6 @@ func TestSystemConfigCORS(t *testing.T) { if !reflect.DeepEqual(actual, expected) { t.Fatalf("DELETE FAILED -- bad: %#v", actual) } - } func TestSystemBackend_mounts(t *testing.T) { @@ -337,7 +336,6 @@ func TestSystemBackend_mount(t *testing.T) { if diff := deep.Equal(resp.Data, exp); len(diff) > 0 { t.Fatalf("bad: diff: %#v", diff) } - } func TestSystemBackend_mount_force_no_cache(t *testing.T) { @@ -2393,7 +2391,6 @@ func TestSystemBackend_ToolsRandom(t *testing.T) { req.Data["format"] = "hex" req.Data["bytes"] = maxBytes + 1 doRequest(req, true, "", 0) - } func TestSystemBackend_InternalUIMounts(t *testing.T) { @@ -3360,11 +3357,11 @@ func passwordPoliciesFieldData(raw map[string]interface{}) *framework.FieldData return &framework.FieldData{ Raw: raw, Schema: map[string]*framework.FieldSchema{ - "name": &framework.FieldSchema{ + "name": { Type: framework.TypeString, Description: "The name of the password policy.", }, - "policy": &framework.FieldSchema{ + "policy": { Type: framework.TypeString, Description: "The password policy", }, diff --git a/vault/mount.go b/vault/mount.go index 0a1d5cc5ca..b5f250ec7d 100644 --- a/vault/mount.go +++ b/vault/mount.go @@ -142,30 +142,38 @@ func (c *Core) tableMetrics(entryCount int, isLocal bool, isAuth bool, compresse return } typeAuthLabelMap := map[bool]metrics.Label{ - true: metrics.Label{Name: "type", Value: "auth"}, - false: metrics.Label{Name: "type", Value: "logical"}, + true: {Name: "type", Value: "auth"}, + false: {Name: "type", Value: "logical"}, } typeLocalLabelMap := map[bool]metrics.Label{ - true: metrics.Label{Name: "local", Value: "true"}, - false: metrics.Label{Name: "local", Value: "false"}, + true: {Name: "local", Value: "true"}, + false: {Name: "local", Value: "false"}, } c.metricSink.SetGaugeWithLabels(metricsutil.LogicalTableSizeName, - float32(entryCount), []metrics.Label{typeAuthLabelMap[isAuth], - typeLocalLabelMap[isLocal]}) + float32(entryCount), []metrics.Label{ + typeAuthLabelMap[isAuth], + typeLocalLabelMap[isLocal], + }) c.metricsHelper.AddGaugeLoopMetric(metricsutil.LogicalTableSizeName, - float32(entryCount), []metrics.Label{typeAuthLabelMap[isAuth], - typeLocalLabelMap[isLocal]}) + float32(entryCount), []metrics.Label{ + typeAuthLabelMap[isAuth], + typeLocalLabelMap[isLocal], + }) c.metricSink.SetGaugeWithLabels(metricsutil.PhysicalTableSizeName, - float32(len(compressedTable)), []metrics.Label{typeAuthLabelMap[isAuth], - typeLocalLabelMap[isLocal]}) + float32(len(compressedTable)), []metrics.Label{ + typeAuthLabelMap[isAuth], + typeLocalLabelMap[isLocal], + }) c.metricsHelper.AddGaugeLoopMetric(metricsutil.PhysicalTableSizeName, - float32(len(compressedTable)), []metrics.Label{typeAuthLabelMap[isAuth], - typeLocalLabelMap[isLocal]}) + float32(len(compressedTable)), []metrics.Label{ + typeAuthLabelMap[isAuth], + typeLocalLabelMap[isLocal], + }) } // shallowClone returns a copy of the mount table that @@ -258,20 +266,20 @@ const mountStateUnmounting = "unmounting" // MountEntry is used to represent a mount table entry type MountEntry struct { - Table string `json:"table"` // The table it belongs to - Path string `json:"path"` // Mount Path - Type string `json:"type"` // Logical backend Type - Description string `json:"description"` // User-provided description - UUID string `json:"uuid"` // Barrier view UUID - BackendAwareUUID string `json:"backend_aware_uuid"` // UUID that can be used by the backend as a helper when a consistent value is needed outside of storage. - Accessor string `json:"accessor"` // Unique but more human-friendly ID. Does not change, not used for any sensitive things (like as a salt, which the UUID sometimes is). - Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived) - Options map[string]string `json:"options"` // Backend options - Local bool `json:"local"` // Local mounts are not replicated or affected by replication - SealWrap bool `json:"seal_wrap"` // Whether to wrap CSPs + Table string `json:"table"` // The table it belongs to + Path string `json:"path"` // Mount Path + Type string `json:"type"` // Logical backend Type + Description string `json:"description"` // User-provided description + UUID string `json:"uuid"` // Barrier view UUID + BackendAwareUUID string `json:"backend_aware_uuid"` // UUID that can be used by the backend as a helper when a consistent value is needed outside of storage. + Accessor string `json:"accessor"` // Unique but more human-friendly ID. Does not change, not used for any sensitive things (like as a salt, which the UUID sometimes is). + Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived) + Options map[string]string `json:"options"` // Backend options + Local bool `json:"local"` // Local mounts are not replicated or affected by replication + SealWrap bool `json:"seal_wrap"` // Whether to wrap CSPs ExternalEntropyAccess bool `json:"external_entropy_access,omitempty"` // Whether to allow external entropy source access - Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount - MountState string `json:"mount_state,omitempty"` // The current mount state. The only non-empty mount state right now is "unmounting" + Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount + MountState string `json:"mount_state,omitempty"` // The current mount state. The only non-empty mount state right now is "unmounting" NamespaceID string `json:"namespace_id"` // namespace contains the populated namespace @@ -901,7 +909,7 @@ func (c *Core) remount(ctx context.Context, src, dst string, updateStorage bool) if match := c.router.MatchingMount(ctx, dst); match != "" { c.mountsLock.Unlock() return fmt.Errorf("existing mount at %q", match) - } + } var entry *MountEntry for _, mountEntry := range c.mounts.Entries { if mountEntry.Path == src && mountEntry.NamespaceID == ns.ID { diff --git a/vault/mount_test.go b/vault/mount_test.go index f350695298..cd1f8f9424 100644 --- a/vault/mount_test.go +++ b/vault/mount_test.go @@ -197,7 +197,7 @@ func TestCore_Mount_Local(t *testing.T) { c.mounts = &MountTable{ Type: mountTableType, Entries: []*MountEntry{ - &MountEntry{ + { Table: mountTableType, Path: "noop/", Type: "kv", @@ -207,7 +207,7 @@ func TestCore_Mount_Local(t *testing.T) { NamespaceID: namespace.RootNamespaceID, namespace: namespace.RootNamespace, }, - &MountEntry{ + { Table: mountTableType, Path: "noop2/", Type: "kv", @@ -444,6 +444,7 @@ func testCore_Unmount_Cleanup(t *testing.T, causeFailure bool) { } } } + func TestCore_RemountConcurrent(t *testing.T) { c2, _, _ := TestCoreUnsealed(t) noop := &NoopBackend{} @@ -518,6 +519,7 @@ func TestCore_RemountConcurrent(t *testing.T) { c2MountMap[v.Path] = v } } + func TestCore_Remount(t *testing.T) { c, keys, _ := TestCoreUnsealed(t) err := c.remount(namespace.RootContext(nil), "secret", "foo", true) @@ -903,7 +905,8 @@ func TestCore_MountInitialize(t *testing.T) { backend := &InitializableBackend{ &NoopBackend{ BackendType: logical.TypeLogical, - }, false} + }, false, + } c, _, _ := TestCoreUnsealed(t) c.logicalBackends["initable"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { @@ -928,7 +931,8 @@ func TestCore_MountInitialize(t *testing.T) { backend := &InitializableBackend{ &NoopBackend{ BackendType: logical.TypeLogical, - }, false} + }, false, + } c, _, _ := TestCoreUnsealed(t) c.logicalBackends["initable"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { @@ -938,7 +942,7 @@ func TestCore_MountInitialize(t *testing.T) { c.mounts = &MountTable{ Type: mountTableType, Entries: []*MountEntry{ - &MountEntry{ + { Table: mountTableType, Path: "foo/", Type: "initable", diff --git a/vault/namespaces.go b/vault/namespaces.go index 335c90896b..834fa465df 100644 --- a/vault/namespaces.go +++ b/vault/namespaces.go @@ -6,9 +6,7 @@ import ( "github.com/hashicorp/vault/helper/namespace" ) -var ( - NamespaceByID func(context.Context, string, *Core) (*namespace.Namespace, error) = namespaceByID -) +var NamespaceByID func(context.Context, string, *Core) (*namespace.Namespace, error) = namespaceByID const ( mountTypeNSCubbyhole = "ns_cubbyhole" diff --git a/vault/plugin_catalog.go b/vault/plugin_catalog.go index 47c6fdbf32..d52c1ac379 100644 --- a/vault/plugin_catalog.go +++ b/vault/plugin_catalog.go @@ -374,7 +374,6 @@ func (c *PluginCatalog) List(ctx context.Context, pluginType consts.PluginType) pluginTypePrefix := pluginType.String() + "/" for _, plugin := range keys { - // Only list user-added plugins if they're of the given type. if entry, err := c.get(ctx, plugin, pluginType); err == nil && entry != nil { diff --git a/vault/plugin_catalog_test.go b/vault/plugin_catalog_test.go index 3af87854b5..2fb138c2f0 100644 --- a/vault/plugin_catalog_test.go +++ b/vault/plugin_catalog_test.go @@ -3,8 +3,6 @@ package vault import ( "context" "fmt" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/pluginutil" "io/ioutil" "os" "path/filepath" @@ -12,6 +10,9 @@ import ( "sort" "testing" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/helper/builtinplugins" ) @@ -106,7 +107,6 @@ func TestPluginCatalog_CRUD(t *testing.T) { if !reflect.DeepEqual(p, expectedBuiltin) { t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin) } - } func TestPluginCatalog_List(t *testing.T) { @@ -179,5 +179,4 @@ func TestPluginCatalog_List(t *testing.T) { t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i+1], p) } } - } diff --git a/vault/policy.go b/vault/policy.go index a6b5b5b397..6f6ec8b558 100644 --- a/vault/policy.go +++ b/vault/policy.go @@ -69,17 +69,15 @@ func (p PolicyType) String() string { return "" } -var ( - cap2Int = map[string]uint32{ - DenyCapability: DenyCapabilityInt, - CreateCapability: CreateCapabilityInt, - ReadCapability: ReadCapabilityInt, - UpdateCapability: UpdateCapabilityInt, - DeleteCapability: DeleteCapabilityInt, - ListCapability: ListCapabilityInt, - SudoCapability: SudoCapabilityInt, - } -) +var cap2Int = map[string]uint32{ + DenyCapability: DenyCapabilityInt, + CreateCapability: CreateCapabilityInt, + ReadCapability: ReadCapabilityInt, + UpdateCapability: UpdateCapabilityInt, + DeleteCapability: DeleteCapabilityInt, + ListCapability: ListCapabilityInt, + SudoCapability: SudoCapabilityInt, +} type egpPath struct { Path string `json:"path"` diff --git a/vault/policy_store_test.go b/vault/policy_store_test.go index 1342d83913..d4b1fcf6e8 100644 --- a/vault/policy_store_test.go +++ b/vault/policy_store_test.go @@ -268,7 +268,7 @@ func testPolicyStoreACL(t *testing.T, ps *PolicyStore, ns *namespace.Namespace) } ctx = namespace.ContextWithNamespace(context.Background(), ns) - acl, err := ps.ACL(ctx, nil, map[string][]string{ns.ID: []string{"dev", "ops"}}) + acl, err := ps.ACL(ctx, nil, map[string][]string{ns.ID: {"dev", "ops"}}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/vault/policy_test.go b/vault/policy_test.go index 33d9f16676..d464dcc6cc 100644 --- a/vault/policy_test.go +++ b/vault/policy_test.go @@ -204,10 +204,10 @@ func TestPolicy_Parse(t *testing.T) { "create", "sudo", }, - DeniedParametersHCL: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}}, + DeniedParametersHCL: map[string][]interface{}{"zip": {}, "zap": {}}, Permissions: &ACLPermissions{ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt), - DeniedParameters: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}}, + DeniedParameters: map[string][]interface{}{"zip": {}, "zap": {}}, }, }, { @@ -231,12 +231,12 @@ func TestPolicy_Parse(t *testing.T) { "create", "sudo", }, - AllowedParametersHCL: map[string][]interface{}{"map": []interface{}{map[string]interface{}{"good": "one"}}, "int": []interface{}{1, 2}}, - DeniedParametersHCL: map[string][]interface{}{"string": []interface{}{"test"}, "bool": []interface{}{false}}, + AllowedParametersHCL: map[string][]interface{}{"map": {map[string]interface{}{"good": "one"}}, "int": {1, 2}}, + DeniedParametersHCL: map[string][]interface{}{"string": {"test"}, "bool": {false}}, Permissions: &ACLPermissions{ CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt), - AllowedParameters: map[string][]interface{}{"map": []interface{}{map[string]interface{}{"good": "one"}}, "int": []interface{}{1, 2}}, - DeniedParameters: map[string][]interface{}{"string": []interface{}{"test"}, "bool": []interface{}{false}}, + AllowedParameters: map[string][]interface{}{"map": {map[string]interface{}{"good": "one"}}, "int": {1, 2}}, + DeniedParameters: map[string][]interface{}{"string": {"test"}, "bool": {false}}, }, IsPrefix: false, }, diff --git a/vault/quotas/quotas_util.go b/vault/quotas/quotas_util.go index 983417476c..eead3381e2 100644 --- a/vault/quotas/quotas_util.go +++ b/vault/quotas/quotas_util.go @@ -37,8 +37,7 @@ func (*entManager) Reset() error { return nil } -type LeaseCountQuota struct { -} +type LeaseCountQuota struct{} func (l LeaseCountQuota) allow(request *Request) (Response, error) { panic("implement me") diff --git a/vault/rekey.go b/vault/rekey.go index 66d3106d15..38912324bd 100644 --- a/vault/rekey.go +++ b/vault/rekey.go @@ -457,7 +457,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) // If PGP keys are passed in, encrypt shares with corresponding PGP keys. if len(c.barrierRekeyConfig.PGPKeys) > 0 { hexEncodedShares := make([][]byte, len(results.SecretShares)) - for i, _ := range results.SecretShares { + for i := range results.SecretShares { hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i])) } results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.barrierRekeyConfig.PGPKeys) @@ -701,7 +701,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string if len(c.recoveryRekeyConfig.PGPKeys) > 0 { hexEncodedShares := make([][]byte, len(results.SecretShares)) - for i, _ := range results.SecretShares { + for i := range results.SecretShares { hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i])) } results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.recoveryRekeyConfig.PGPKeys) diff --git a/vault/request_handling.go b/vault/request_handling.go index 13c33ad7d7..fe0fe41177 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -61,7 +61,7 @@ func (c *Core) fetchEntityAndDerivedPolicies(ctx context.Context, tokenNS *names return nil, nil, nil } - //c.logger.Debug("entity set on the token", "entity_id", te.EntityID) + // c.logger.Debug("entity set on the token", "entity_id", te.EntityID) // Fetch the entity entity, err := c.identityStore.MemDBEntityByID(entityID, false) @@ -83,7 +83,7 @@ func (c *Core) fetchEntityAndDerivedPolicies(ctx context.Context, tokenNS *names policies := make(map[string][]string) if entity != nil { - //c.logger.Debug("entity successfully fetched; adding entity policies to token's policies to create ACL") + // c.logger.Debug("entity successfully fetched; adding entity policies to token's policies to create ACL") // Attach the policies on the entity if len(entity.Policies) != 0 { @@ -457,7 +457,7 @@ func (c *Core) handleCancelableRequest(ctx context.Context, ns *namespace.Namesp return nil, logical.CodedError(403, "namespaces feature not enabled") } - var walState = &logical.WALState{} + walState := &logical.WALState{} ctx = logical.IndexStateContext(ctx, walState) var auth *logical.Auth if c.router.LoginPath(ctx, req.Path) { diff --git a/vault/request_handling_test.go b/vault/request_handling_test.go index dd8b2b76db..c4e22a565d 100644 --- a/vault/request_handling_test.go +++ b/vault/request_handling_test.go @@ -344,7 +344,6 @@ func checkCounter(t *testing.T, inmemSink *metrics.InmemSink, keyPrefix string, if counter.Sum != 1.0 { t.Errorf("Counter sum %v is not 1.", counter.Sum) } - } func TestRequestHandling_LoginMetric(t *testing.T) { @@ -429,7 +428,6 @@ func TestRequestHandling_LoginMetric(t *testing.T) { "token_type": "service", }, ) - } func TestRequestHandling_SecretLeaseMetric(t *testing.T) { diff --git a/vault/rollback.go b/vault/rollback.go index cd85f0b0b2..d7dd71e1d2 100644 --- a/vault/rollback.go +++ b/vault/rollback.go @@ -117,7 +117,6 @@ func (m *RollbackManager) run() { // triggerRollbacks is used to trigger the rollbacks across all the backends func (m *RollbackManager) triggerRollbacks() { - backends := m.backends() for _, e := range backends { diff --git a/vault/rollback_test.go b/vault/rollback_test.go index d34837a48e..4308d70e1c 100644 --- a/vault/rollback_test.go +++ b/vault/rollback_test.go @@ -24,7 +24,7 @@ func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) { view := NewBarrierView(barrier, "logical/") mounts.Entries = []*MountEntry{ - &MountEntry{ + { Path: "foo", NamespaceID: namespace.RootNamespaceID, namespace: namespace.RootNamespace, diff --git a/vault/router.go b/vault/router.go index 9624b09b59..be067f78a2 100644 --- a/vault/router.go +++ b/vault/router.go @@ -18,11 +18,9 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -var ( - deniedPassthroughRequestHeaders = []string{ - consts.AuthHeaderName, - } -) +var deniedPassthroughRequestHeaders = []string{ + consts.AuthHeaderName, +} // Router is used to do prefix based routing of a request to a logical backend type Router struct { @@ -346,9 +344,11 @@ func (r *Router) MountConflict(ctx context.Context, path string) string { func (r *Router) MatchingStorageByAPIPath(ctx context.Context, path string) logical.Storage { return r.matchingStorage(ctx, path, true) } + func (r *Router) MatchingStorageByStoragePath(ctx context.Context, path string) logical.Storage { return r.matchingStorage(ctx, path, false) } + func (r *Router) matchingStorage(ctx context.Context, path string, apiPath bool) logical.Storage { ns, err := namespace.FromContext(ctx) if err != nil { @@ -512,8 +512,10 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath } req.Path = adjustedPath - defer metrics.MeasureSince([]string{"route", string(req.Operation), - strings.Replace(mount, "/", "-", -1)}, time.Now()) + defer metrics.MeasureSince([]string{ + "route", string(req.Operation), + strings.Replace(mount, "/", "-", -1), + }, time.Now()) re := raw.(*routeEntry) // Grab a read lock on the route entry, this protects against the backend diff --git a/vault/seal.go b/vault/seal.go index f6e76e5741..3bd0cc80b4 100644 --- a/vault/seal.go +++ b/vault/seal.go @@ -7,9 +7,10 @@ import ( "encoding/json" "errors" "fmt" + "sync/atomic" + "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" - "sync/atomic" "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" diff --git a/vault/seal_autoseal.go b/vault/seal_autoseal.go index cc13a12a13..5f38582b0d 100644 --- a/vault/seal_autoseal.go +++ b/vault/seal_autoseal.go @@ -456,7 +456,6 @@ func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { return errwrap.Wrapf("failed to decrypt encrypted recovery key: {{err}}", err) - } if err := d.SetRecoveryKey(ctx, pt); err != nil { return errwrap.Wrapf("failed to save upgraded recovery key: {{err}}", err) diff --git a/vault/sealunwrapper.go b/vault/sealunwrapper.go index f3efa25fee..1d124ea530 100644 --- a/vault/sealunwrapper.go +++ b/vault/sealunwrapper.go @@ -33,8 +33,10 @@ func NewSealUnwrapper(underlying physical.Backend, logger log.Logger) physical.B return ret } -var _ physical.Backend = (*sealUnwrapper)(nil) -var _ physical.Transactional = (*transactionalSealUnwrapper)(nil) +var ( + _ physical.Backend = (*sealUnwrapper)(nil) + _ physical.Transactional = (*transactionalSealUnwrapper)(nil) +) type sealUnwrapper struct { underlying physical.Backend diff --git a/vault/testing.go b/vault/testing.go index d69fec7c35..cb28f4ec2c 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -389,7 +389,6 @@ func testCoreAddSecretMount(t testing.T, core *Core, token string) { if resp.IsError() { t.Fatal(err) } - } func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) { @@ -520,8 +519,10 @@ func TestAddTestPlugin(t testing.T, c *Core, name string, pluginType consts.Plug } } -var testLogicalBackends = map[string]logical.Factory{} -var testCredentialBackends = map[string]logical.Factory{} +var ( + testLogicalBackends = map[string]logical.Factory{} + testCredentialBackends = map[string]logical.Factory{} +) // This adds a credential backend for the test core. This needs to be // invoked before the test core is created. @@ -905,7 +906,6 @@ func (c *TestClusterCore) Seal(t testing.T) { } func (c *TestClusterCore) stop() error { - c.Logger().Info("stopping vault test core") if c.Listeners != nil { @@ -1121,12 +1121,12 @@ func NewTestLogger(t testing.T) *TestLogger { var logPath string output := os.Stderr - var logDir = os.Getenv("VAULT_TEST_LOG_DIR") + logDir := os.Getenv("VAULT_TEST_LOG_DIR") if logDir != "" { logPath = filepath.Join(logDir, t.Name()+".log") // t.Name may include slashes. dir, _ := filepath.Split(logPath) - err := os.MkdirAll(dir, 0755) + err := os.MkdirAll(dir, 0o755) if err != nil { t.Fatal(err) } @@ -1212,7 +1212,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te if opts != nil && opts.TempDir != "" { if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) { - if err := os.MkdirAll(opts.TempDir, 0700); err != nil { + if err := os.MkdirAll(opts.TempDir, 0o700); err != nil { t.Fatal(err) } } @@ -1271,7 +1271,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te } testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock) testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem") - err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0755) + err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0o755) if err != nil { t.Fatal(err) } @@ -1284,7 +1284,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te Bytes: marshaledCAKey, } testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock) - err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0755) + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0o755) if err != nil { t.Fatal(err) } @@ -1375,11 +1375,11 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) - err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0755) + err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0o755) if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0755) + err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0o755) if err != nil { t.Fatal(err) } @@ -1404,10 +1404,11 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te } tlsConfig.BuildNameToCertificate() tlsConfigs = append(tlsConfigs, tlsConfig) - lns := []*TestListener{&TestListener{ - Listener: tls.NewListener(ln, tlsConfig), - Address: ln.Addr().(*net.TCPAddr), - }, + lns := []*TestListener{ + { + Listener: tls.NewListener(ln, tlsConfig), + Address: ln.Addr().(*net.TCPAddr), + }, } listeners = append(listeners, lns) var handler http.Handler = http.NewServeMux() @@ -1710,10 +1711,11 @@ func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOpt if err != nil { t.Fatal(err) } - tcc.Listeners = []*TestListener{&TestListener{ - Listener: tls.NewListener(ln, tcc.TLSConfig), - Address: ln.Addr().(*net.TCPAddr), - }, + tcc.Listeners = []*TestListener{ + { + Listener: tls.NewListener(ln, tcc.TLSConfig), + Address: ln.Addr().(*net.TCPAddr), + }, } tcc.Handler = http.NewServeMux() @@ -1901,7 +1903,6 @@ func (testCluster *TestCluster) setupClusterListener( } func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAuditBackend bool) { - leader := tc.Cores[0] bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, leader.Core, leader.Handler) @@ -1912,7 +1913,7 @@ func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAudit tc.RootToken = root // Write root token and barrier keys - err := ioutil.WriteFile(filepath.Join(tc.TempDir, "root_token"), []byte(root), 0755) + err := ioutil.WriteFile(filepath.Join(tc.TempDir, "root_token"), []byte(root), 0o755) if err != nil { t.Fatal(err) } @@ -1923,7 +1924,7 @@ func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAudit buf.WriteRune('\n') } } - err = ioutil.WriteFile(filepath.Join(tc.TempDir, "barrier_keys"), buf.Bytes(), 0755) + err = ioutil.WriteFile(filepath.Join(tc.TempDir, "barrier_keys"), buf.Bytes(), 0o755) if err != nil { t.Fatal(err) } @@ -1933,7 +1934,7 @@ func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAudit buf.WriteRune('\n') } } - err = ioutil.WriteFile(filepath.Join(tc.TempDir, "recovery_keys"), buf.Bytes(), 0755) + err = ioutil.WriteFile(filepath.Join(tc.TempDir, "recovery_keys"), buf.Bytes(), 0o755) if err != nil { t.Fatal(err) } @@ -2050,7 +2051,6 @@ func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions, addAudit t.Fatal(err) } } - } func (testCluster *TestCluster) getAPIClient( diff --git a/vault/token_store.go b/vault/token_store.go index 3d177e83f3..b1c6725c4e 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -142,7 +142,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "create/" + framework.GenericNameRegex("role_name"), Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role", }, @@ -171,7 +171,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "lookup", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to lookup (POST request body)", }, @@ -190,7 +190,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "lookup-accessor", Fields: map[string]*framework.FieldSchema{ - "accessor": &framework.FieldSchema{ + "accessor": { Type: framework.TypeString, Description: "Accessor of the token to look up (request body)", }, @@ -208,7 +208,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "lookup-self$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to look up (unused, does not need to be set)", }, @@ -227,7 +227,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "revoke-accessor", Fields: map[string]*framework.FieldSchema{ - "accessor": &framework.FieldSchema{ + "accessor": { Type: framework.TypeString, Description: "Accessor of the token (request body)", }, @@ -256,7 +256,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "revoke", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to revoke (request body)", }, @@ -274,7 +274,7 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "revoke-orphan", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to revoke (request body)", }, @@ -292,11 +292,11 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "renew-accessor", Fields: map[string]*framework.FieldSchema{ - "accessor": &framework.FieldSchema{ + "accessor": { Type: framework.TypeString, Description: "Accessor of the token to renew (request body)", }, - "increment": &framework.FieldSchema{ + "increment": { Type: framework.TypeDurationSecond, Default: 0, Description: "The desired increment in seconds to the token expiration", @@ -315,11 +315,11 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "renew-self$", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to renew (unused, does not need to be set)", }, - "increment": &framework.FieldSchema{ + "increment": { Type: framework.TypeDurationSecond, Default: 0, Description: "The desired increment in seconds to the token expiration", @@ -338,11 +338,11 @@ func (ts *TokenStore) paths() []*framework.Path { Pattern: "renew", Fields: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to renew (request body)", }, - "increment": &framework.FieldSchema{ + "increment": { Type: framework.TypeDurationSecond, Default: 0, Description: "The desired increment in seconds to the token expiration", @@ -372,56 +372,56 @@ func (ts *TokenStore) paths() []*framework.Path { rolesPath := &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("role_name"), Fields: map[string]*framework.FieldSchema{ - "role_name": &framework.FieldSchema{ + "role_name": { Type: framework.TypeString, Description: "Name of the role", }, - "allowed_policies": &framework.FieldSchema{ + "allowed_policies": { Type: framework.TypeCommaStringSlice, Description: tokenAllowedPoliciesHelp, }, - "disallowed_policies": &framework.FieldSchema{ + "disallowed_policies": { Type: framework.TypeCommaStringSlice, Description: tokenDisallowedPoliciesHelp, }, - "orphan": &framework.FieldSchema{ + "orphan": { Type: framework.TypeBool, Description: tokenOrphanHelp, }, - "period": &framework.FieldSchema{ + "period": { Type: framework.TypeDurationSecond, Description: "Use 'token_period' instead.", Deprecated: true, }, - "path_suffix": &framework.FieldSchema{ + "path_suffix": { Type: framework.TypeString, Description: tokenPathSuffixHelp + pathSuffixSanitize.String(), }, - "explicit_max_ttl": &framework.FieldSchema{ + "explicit_max_ttl": { Type: framework.TypeDurationSecond, Description: "Use 'token_explicit_max_ttl' instead.", Deprecated: true, }, - "renewable": &framework.FieldSchema{ + "renewable": { Type: framework.TypeBool, Default: true, Description: tokenRenewableHelp, }, - "bound_cidrs": &framework.FieldSchema{ + "bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: "Use 'token_bound_cidrs' instead.", Deprecated: true, }, - "allowed_entity_aliases": &framework.FieldSchema{ + "allowed_entity_aliases": { Type: framework.TypeCommaStringSlice, Description: "String or JSON list of allowed entity aliases. If set, specifies the entity aliases which are allowed to be used during token generation. This field supports globbing.", }, @@ -568,7 +568,6 @@ func NewTokenStore(ctx context.Context, logger log.Logger, core *Core, config *l } func (ts *TokenStore) Invalidate(ctx context.Context, key string) { - switch key { case tokenSubPath + salt.DefaultLocation: ts.saltLock.Lock() @@ -1738,7 +1737,6 @@ func (ts *TokenStore) lookupByAccessor(ctx context.Context, id string, salted, t } entry, err := ts.accessorView(ns).Get(ctx, lookupID) - if err != nil { return aEntry, errwrap.Wrapf("failed to read index using accessor: {{err}}", err) } @@ -1794,7 +1792,6 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data var tidyErrors *multierror.Error doTidy := func() error { - ts.logger.Info("beginning tidy operation on tokens") defer ts.logger.Info("finished tidy operation on tokens") @@ -2061,7 +2058,7 @@ func (ts *TokenStore) handleUpdateLookupAccessor(ctx context.Context, req *logic "token": aEntry.TokenID, }, Schema: map[string]*framework.FieldSchema{ - "token": &framework.FieldSchema{ + "token": { Type: framework.TypeString, Description: "Token to lookup", }, @@ -2076,7 +2073,6 @@ func (ts *TokenStore) handleUpdateLookupAccessor(ctx context.Context, req *logic } if resp.IsError() { return resp, nil - } // Remove the token ID from the response @@ -3480,7 +3476,6 @@ func (ts *TokenStore) gaugeCollector(ctx context.Context) ([]metricsutil.GaugeLa return true } }) - if err != nil { return []metricsutil.GaugeLabelValues{}, suppressRestoreModeError(err) } @@ -3497,7 +3492,6 @@ func (ts *TokenStore) gaugeCollector(ctx context.Context) ([]metricsutil.GaugeLa values[i].Value = float32(intValues[i]) } return values, nil - } func (ts *TokenStore) gaugeCollectorByPolicy(ctx context.Context) ([]metricsutil.GaugeLabelValues, error) { @@ -3529,7 +3523,6 @@ func (ts *TokenStore) gaugeCollectorByPolicy(ctx context.Context) ([]metricsutil return true } }) - if err != nil { return []metricsutil.GaugeLabelValues{}, suppressRestoreModeError(err) } @@ -3596,7 +3589,6 @@ func (ts *TokenStore) gaugeCollectorByTtl(ctx context.Context) ([]metricsutil.Ga return true } }) - if err != nil { return []metricsutil.GaugeLabelValues{}, suppressRestoreModeError(err) } @@ -3697,7 +3689,6 @@ func (ts *TokenStore) gaugeCollectorByMethod(ctx context.Context) ([]metricsutil return true } }) - if err != nil { return []metricsutil.GaugeLabelValues{}, suppressRestoreModeError(err) } diff --git a/vault/token_store_test.go b/vault/token_store_test.go index 7643927c03..94f3ab7cfd 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -4051,7 +4051,7 @@ func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) { func TestTokenStore_RoleTokenFields(t *testing.T) { c, _, _ := TestCoreUnsealed(t) - //c, _, root := TestCoreUnsealed(t) + // c, _, root := TestCoreUnsealed(t) ts := c.tokenStore rootContext := namespace.RootContext(context.Background()) @@ -4466,6 +4466,7 @@ func testTokenStore_NumUses_SelfLookupHelper(t *testing.T, core *Core, clientTok t.Fatalf("num_uses mismatch (expected %d, got %d)", expectedNumUses, actualNumUses) } } + func TestTokenStore_NumUses(t *testing.T) { core, _, root := TestCoreUnsealed(t) roleNumUses := 10 diff --git a/vault/wrapping.go b/vault/wrapping.go index b2558ac454..6e67093d4c 100644 --- a/vault/wrapping.go +++ b/vault/wrapping.go @@ -409,7 +409,7 @@ func (c *Core) ValidateWrappingToken(ctx context.Context, req *logical.Request) return false, errwrap.Wrapf("wrapping token could not be parsed: {{err}}", err) } var claims squarejwt.Claims - var allClaims = make(map[string]interface{}) + allClaims := make(map[string]interface{}) if err = parsedJWT.Claims(&c.wrappingJWTKey.PublicKey, &claims, &allClaims); err != nil { return false, errwrap.Wrapf("wrapping token signature could not be validated: {{err}}", err) } diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index d548009768..ce5f7798b2 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -25,26 +25,30 @@ import ( "golang.org/x/time/rate" ) -const EnvVaultAddress = "VAULT_ADDR" -const EnvVaultAgentAddr = "VAULT_AGENT_ADDR" -const EnvVaultCACert = "VAULT_CACERT" -const EnvVaultCAPath = "VAULT_CAPATH" -const EnvVaultClientCert = "VAULT_CLIENT_CERT" -const EnvVaultClientKey = "VAULT_CLIENT_KEY" -const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" -const EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" -const EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" -const EnvVaultNamespace = "VAULT_NAMESPACE" -const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" -const EnvVaultWrapTTL = "VAULT_WRAP_TTL" -const EnvVaultMaxRetries = "VAULT_MAX_RETRIES" -const EnvVaultToken = "VAULT_TOKEN" -const EnvVaultMFA = "VAULT_MFA" -const EnvRateLimit = "VAULT_RATE_LIMIT" +const ( + EnvVaultAddress = "VAULT_ADDR" + EnvVaultAgentAddr = "VAULT_AGENT_ADDR" + EnvVaultCACert = "VAULT_CACERT" + EnvVaultCAPath = "VAULT_CAPATH" + EnvVaultClientCert = "VAULT_CLIENT_CERT" + EnvVaultClientKey = "VAULT_CLIENT_KEY" + EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + EnvVaultNamespace = "VAULT_NAMESPACE" + EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" + EnvVaultWrapTTL = "VAULT_WRAP_TTL" + EnvVaultMaxRetries = "VAULT_MAX_RETRIES" + EnvVaultToken = "VAULT_TOKEN" + EnvVaultMFA = "VAULT_MFA" + EnvRateLimit = "VAULT_RATE_LIMIT" +) // Deprecated values -const EnvVaultAgentAddress = "VAULT_AGENT_ADDR" -const EnvVaultInsecure = "VAULT_SKIP_VERIFY" +const ( + EnvVaultAgentAddress = "VAULT_AGENT_ADDR" + EnvVaultInsecure = "VAULT_SKIP_VERIFY" +) // WrappingLookupFunc is a function that, given an HTTP verb and a path, // returns an optional string duration to be used for response wrapping (e.g. @@ -359,7 +363,6 @@ func (c *Config) ReadEnvironment() error { } func parseRateLimit(val string) (rate float64, burst int, err error) { - _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) if err != nil { rate, err = strconv.ParseFloat(val, 64) @@ -370,7 +373,6 @@ func parseRateLimit(val string) (rate float64, burst int, err error) { } return rate, burst, err - } // Client is the client to the Vault API. Create a client with NewClient. @@ -793,7 +795,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { policyOverride := c.policyOverride c.modifyLock.RUnlock() - var host = addr.Host + host := addr.Host // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV // record and take the highest match; this is not designed for high-availability, just discovery // Internet Draft specifies that the SRV record is ignored if a port is given @@ -985,8 +987,10 @@ START: return result, nil } -type RequestCallback func(*Request) -type ResponseCallback func(*Response) +type ( + RequestCallback func(*Request) + ResponseCallback func(*Response) +) // WithRequestCallbacks makes a shallow clone of Client, modifies it to use // the given callbacks, and returns it. Each of the callbacks will be invoked diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go index 841c51c094..79e4c20433 100644 --- a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -380,5 +380,7 @@ func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) { r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) } -type Renewer = LifetimeWatcher -type RenewerInput = LifetimeWatcherInput +type ( + Renewer = LifetimeWatcher + RenewerInput = LifetimeWatcherInput +) diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go index b836b77a5a..5979c56567 100644 --- a/vendor/github.com/hashicorp/vault/api/output_string.go +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -11,9 +11,7 @@ const ( ErrOutputStringRequest = "output a string, please" ) -var ( - LastOutputStringError *OutputStringError -) +var LastOutputStringError *OutputStringError type OutputStringError struct { *retryablehttp.Request diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go index 3aa4e6e46a..c2978b388f 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -82,7 +82,7 @@ func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) } - var allClaims = make(map[string]interface{}) + allClaims := make(map[string]interface{}) if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) } diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go index 5fa6f3585d..d0c6408366 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_audit.go +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -52,7 +52,6 @@ func (c *Sys) ListAudit() (map[string]*Audit, error) { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) - if err != nil { return nil, err } @@ -94,7 +93,6 @@ func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) e ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() resp, err := c.c.RawRequestWithContext(ctx, r) - if err != nil { return err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go index e7a9c222d8..46abae4eff 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_auth.go +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -74,7 +74,9 @@ func (c *Sys) DisableAuth(path string) error { } // Rather than duplicate, we can use modern Go's type aliasing -type EnableAuthOptions = MountInput -type AuthConfigInput = MountConfigInput -type AuthMount = MountOutput -type AuthConfigOutput = MountConfigOutput +type ( + EnableAuthOptions = MountInput + AuthConfigInput = MountConfigInput + AuthMount = MountOutput + AuthConfigOutput = MountConfigOutput +) diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go index d90bcd0ab3..c17072d958 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -109,7 +109,6 @@ func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { for i, nameIfc := range pluginsIfc { name, ok := nameIfc.(string) if !ok { - } plugins[i] = name } @@ -323,7 +322,6 @@ func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*R return &r, nil } return nil, nil - } // catalogPathByType is a helper to construct the proper API path by plugin type diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go index 907e5352b7..c30c86d0c9 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/client.go @@ -32,18 +32,17 @@ func (dc *DatabasePluginClient) Close() error { // plugin. The client is wrapped in a DatabasePluginClient object to ensure the // plugin is killed on call of Close(). func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (Database, error) { - // pluginSets is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ // Version 3 used to supports both protocols. We want to keep it around // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "database": new(GRPCDatabasePlugin), }, // Version 4 only supports gRPC - 4: plugin.PluginSet{ + 4: { "database": new(GRPCDatabasePlugin), }, } diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go index bfd848021c..927f524afc 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/grpc_transport.go @@ -65,7 +65,6 @@ func (s *gRPCServer) RevokeUser(ctx context.Context, req *RevokeUserRequest) (*E } func (s *gRPCServer) RotateRootCredentials(ctx context.Context, req *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) { - resp, err := s.impl.RotateRootCredentials(ctx, req.Statements) if err != nil { return nil, err @@ -128,7 +127,6 @@ func (s *gRPCServer) GenerateCredentials(ctx context.Context, _ *Empty) (*Genera } func (s *gRPCServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { - username, password, err := s.impl.SetCredentials(ctx, *req.Statements, *req.StaticUserConfig) if err != nil { return nil, err @@ -222,7 +220,6 @@ func (c *gRPCClient) RevokeUser(ctx context.Context, statements Statements, user Statements: &statements, Username: username, }) - if err != nil { if c.doneCtx.Err() != nil { return ErrPluginShutdown @@ -243,7 +240,6 @@ func (c *gRPCClient) RotateRootCredentials(ctx context.Context, statements []str resp, err := c.client.RotateRootCredentials(ctx, &RotateRootCredentialsRequest{ Statements: statements, }) - if err != nil { if c.doneCtx.Err() != nil { return nil, ErrPluginShutdown @@ -330,6 +326,7 @@ func (c *gRPCClient) GenerateCredentials(ctx context.Context) (string, error) { return resp.Password, nil } + func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, staticUser StaticUserConfig) (username, password string, err error) { ctx, cancel := context.WithCancel(ctx) quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) @@ -340,7 +337,6 @@ func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, StaticUserConfig: &staticUser, Statements: &statements, }) - if err != nil { // Fall back to old call if not implemented grpcStatus, ok := status.FromError(err) diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go index 79bc0637a8..6788e3379d 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/plugin.go @@ -146,8 +146,10 @@ var handshakeConfig = plugin.HandshakeConfig{ MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", } -var _ plugin.Plugin = &GRPCDatabasePlugin{} -var _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +var ( + _ plugin.Plugin = &GRPCDatabasePlugin{} + _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +) // GRPCDatabasePlugin is the plugin.Plugin implementation that only supports GRPC // transport diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go index 00e71e128c..4949384baf 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/server.go @@ -28,12 +28,12 @@ func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.S // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "database": &GRPCDatabasePlugin{ Impl: db, }, }, - 4: plugin.PluginSet{ + 4: { "database": &GRPCDatabasePlugin{ Impl: db, }, diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/grpc_database_plugin.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/grpc_database_plugin.go index 24468f72fc..96d296ad79 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/grpc_database_plugin.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/grpc_database_plugin.go @@ -25,8 +25,10 @@ type GRPCDatabasePlugin struct { plugin.NetRPCUnsupportedPlugin } -var _ plugin.Plugin = &GRPCDatabasePlugin{} -var _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +var ( + _ plugin.Plugin = &GRPCDatabasePlugin{} + _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +) func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { proto.RegisterDatabaseServer(s, gRPCServer{impl: d.Impl}) diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_client.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_client.go index e74abf9bcd..d2e0961104 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_client.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_client.go @@ -34,7 +34,7 @@ func (dc *DatabasePluginClient) Close() error { func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (Database, error) { // pluginSets is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ - 5: plugin.PluginSet{ + 5: { "database": new(GRPCDatabasePlugin), }, } diff --git a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_server.go b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_server.go index d692b9b461..11d04e6450 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_server.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/dbplugin/v5/plugin_server.go @@ -23,7 +23,7 @@ func ServeConfig(db Database) *plugin.ServeConfig { // pluginSets is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ - 5: plugin.PluginSet{ + 5: { "database": &GRPCDatabasePlugin{ Impl: db, }, diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go index 35553d2261..1749b275a2 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/connutil/connutil.go @@ -6,9 +6,7 @@ import ( "sync" ) -var ( - ErrNotInitialized = errors.New("connection has not been initialized") -) +var ErrNotInitialized = errors.New("connection has not been initialized") // ConnectionProducer can be used as an embedded interface in the Database // definition. It implements the methods dealing with individual database diff --git a/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go b/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go index 12b744fc43..d35d007bef 100644 --- a/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go +++ b/vendor/github.com/hashicorp/vault/sdk/database/helper/credsutil/credsutil.go @@ -2,9 +2,8 @@ package credsutil import ( "context" - "time" - "fmt" + "time" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/helper/base62" diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/backend.go b/vendor/github.com/hashicorp/vault/sdk/framework/backend.go index 416d5d856f..0a0894fd01 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/backend.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/backend.go @@ -164,7 +164,8 @@ func (b *Backend) HandleExistenceCheck(ctx context.Context, req *logical.Request fd := FieldData{ Raw: raw, - Schema: path.Fields} + Schema: path.Fields, + } err = fd.Validate() if err != nil { @@ -260,7 +261,8 @@ func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*log fd := FieldData{ Raw: raw, - Schema: path.Fields} + Schema: path.Fields, + } if req.Operation != logical.HelpOperation { err := fd.Validate() diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/filter.go b/vendor/github.com/hashicorp/vault/sdk/framework/filter.go index e042b53908..faaccba2a8 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/filter.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/filter.go @@ -2,6 +2,7 @@ package framework import ( "context" + "github.com/hashicorp/vault/sdk/logical" "github.com/ryanuber/go-glob" ) @@ -31,4 +32,4 @@ func GlobListFilter(fieldName string, callback OperationFunc) OperationFunc { } return resp, nil } -} \ No newline at end of file +} diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go b/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go index 7791ba8fb2..02667cda4d 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/openapi.go @@ -39,12 +39,10 @@ func NewOASDocument() *OASDocument { // If a document has been decoded from JSON or received from a plugin, it will be as a map[string]interface{} // and needs special handling beyond the default mapstructure decoding. func NewOASDocumentFromMap(input map[string]interface{}) (*OASDocument, error) { - // The Responses map uses integer keys (the response code), but once translated into JSON // (e.g. during the plugin transport) these become strings. mapstructure will not coerce these back // to integers without a custom decode hook. decodeHook := func(src reflect.Type, tgt reflect.Type, inputRaw interface{}) (interface{}, error) { - // Only alter data if: // 1. going from string to int // 2. string represent an int in status code range (100-599) @@ -165,7 +163,7 @@ type OASSchema struct { Default interface{} `json:"default,omitempty"` Example interface{} `json:"example,omitempty"` Deprecated bool `json:"deprecated,omitempty"` - //DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` + // DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` DisplayValue interface{} `json:"x-vault-displayValue,omitempty" mapstructure:"x-vault-displayValue,omitempty"` DisplaySensitive bool `json:"x-vault-displaySensitive,omitempty" mapstructure:"x-vault-displaySensitive,omitempty"` DisplayGroup string `json:"x-vault-displayGroup,omitempty" mapstructure:"x-vault-displayGroup,omitempty"` @@ -192,15 +190,17 @@ var OASStdRespNoContent = &OASResponse{ // Both "(leases/)?renew" and "(/(?P.+))?" formats are detected var optRe = regexp.MustCompile(`(?U)\([^(]*\)\?|\(/\(\?P<[^(]*\)\)\?`) -var reqdRe = regexp.MustCompile(`\(?\?P<(\w+)>[^)]*\)?`) // Capture required parameters, e.g. "(?Pregex)" -var altRe = regexp.MustCompile(`\((.*)\|(.*)\)`) // Capture alternation elements, e.g. "(raw/?$|raw/(?P.+))" -var pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", -var cleanCharsRe = regexp.MustCompile("[()^$?]") // Set of regex characters that will be stripped during cleaning -var cleanSuffixRe = regexp.MustCompile(`/\?\$?$`) // Path suffix patterns that will be stripped during cleaning -var wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning -var altFieldsGroupRe = regexp.MustCompile(`\(\?P<\w+>\w+(\|\w+)+\)`) // Match named groups that limit options, e.g. "(?a|b|c)" -var altFieldsRe = regexp.MustCompile(`\w+(\|\w+)+`) // Match an options set, e.g. "a|b|c" -var nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters +var ( + reqdRe = regexp.MustCompile(`\(?\?P<(\w+)>[^)]*\)?`) // Capture required parameters, e.g. "(?Pregex)" + altRe = regexp.MustCompile(`\((.*)\|(.*)\)`) // Capture alternation elements, e.g. "(raw/?$|raw/(?P.+))" + pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", + cleanCharsRe = regexp.MustCompile("[()^$?]") // Set of regex characters that will be stripped during cleaning + cleanSuffixRe = regexp.MustCompile(`/\?\$?$`) // Path suffix patterns that will be stripped during cleaning + wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning + altFieldsGroupRe = regexp.MustCompile(`\(\?P<\w+>\w+(\|\w+)+\)`) // Match named groups that limit options, e.g. "(?a|b|c)" + altFieldsRe = regexp.MustCompile(`\w+(\|\w+)+`) // Match an options set, e.g. "a|b|c" + nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters +) // documentPaths parses all paths in a framework.Backend into OpenAPI paths. func documentPaths(backend *Backend, doc *OASDocument) error { diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/path.go b/vendor/github.com/hashicorp/vault/sdk/framework/path.go index 1339ca6af3..4dc8ca3033 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/path.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/path.go @@ -274,7 +274,7 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { // Alphabetize the fields fieldKeys := make([]string, 0, len(p.Fields)) - for k, _ := range p.Fields { + for k := range p.Fields { fieldKeys = append(fieldKeys, k) } sort.Strings(fieldKeys) diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go b/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go index 8e1b91864b..0cba8ea2fb 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/path_map.go @@ -37,7 +37,7 @@ func (p *PathMap) init() { if p.Schema == nil { p.Schema = map[string]*FieldSchema{ - "value": &FieldSchema{ + "value": { Type: TypeString, Description: fmt.Sprintf("Value for %s mapping", p.Name), }, @@ -207,7 +207,7 @@ func (p *PathMap) Paths() []*Path { } return []*Path{ - &Path{ + { Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name), Callbacks: map[logical.Operation]OperationFunc{ @@ -218,7 +218,7 @@ func (p *PathMap) Paths() []*Path { HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name), }, - &Path{ + { Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name), Fields: schema, diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go b/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go index 7657b4b0a9..7befb39954 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/policy_map.go @@ -59,7 +59,7 @@ func (p *PolicyMap) Policies(ctx context.Context, s logical.Storage, names ...st } list := make([]string, 0, len(set)) - for k, _ := range set { + for k := range set { list = append(list, k) } sort.Strings(list) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go index 8cb8ab154c..1ff60d696b 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go @@ -62,7 +62,8 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, AccessKeyID: c.AccessKey, SecretAccessKey: c.SecretKey, SessionToken: c.SessionToken, - }}) + }, + }) c.log(hclog.Debug, "added static credential provider", "AccessKey", c.AccessKey) case c.AccessKey == "" && c.SecretKey == "": @@ -92,7 +93,7 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, c.log(hclog.Warn, "error assuming role", "roleARN", roleARN, "tokenPath", tokenPath, "sessionName", sessionName, "err", err) } - //Add the web identity role credential provider + // Add the web identity role credential provider providers = append(providers, webIdentityProvider) } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go b/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go index 57a76d4422..36c6bc9a2e 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/base62/base62.go @@ -9,8 +9,10 @@ import ( uuid "github.com/hashicorp/go-uuid" ) -const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" -const csLen = byte(len(charset)) +const ( + charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + csLen = byte(len(charset)) +) // Random generates a random string using base-62 characters. // Resulting entropy is ~5.95 bits/character. diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go index 12198798e8..c23cca994c 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go @@ -643,7 +643,7 @@ func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertB !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId) { result.CAChain = []*CertBlock{ - &CertBlock{ + { Certificate: data.SigningBundle.Certificate, Bytes: data.SigningBundle.CertificateBytes, }, diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go index 1c7c190777..8a1a1d5fa9 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go @@ -51,7 +51,7 @@ type Secret struct { // names rather than official names, to eliminate confusion type PrivateKeyType string -//Well-known PrivateKeyTypes +// Well-known PrivateKeyTypes const ( UnknownPrivateKey PrivateKeyType = "" RSAPrivateKey PrivateKeyType = "rsa" @@ -63,24 +63,24 @@ const ( // client use, or both, which affects which values are set type TLSUsage int -//Well-known TLSUsage types +// Well-known TLSUsage types const ( TLSUnknown TLSUsage = 0 TLSServer TLSUsage = 1 << iota TLSClient ) -//BlockType indicates the serialization format of the key +// BlockType indicates the serialization format of the key type BlockType string -//Well-known formats +// Well-known formats const ( PKCS1Block BlockType = "RSA PRIVATE KEY" PKCS8Block BlockType = "PRIVATE KEY" ECBlock BlockType = "EC PRIVATE KEY" ) -//ParsedPrivateKeyContainer allows common key setting for certs and CSRs +// ParsedPrivateKeyContainer allows common key setting for certs and CSRs type ParsedPrivateKeyContainer interface { SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) } @@ -283,7 +283,7 @@ func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { block.Bytes = p.PrivateKeyBytes result.PrivateKeyType = p.PrivateKeyType - //Handle bundle not parsed by us + // Handle bundle not parsed by us if block.Type == "" { switch p.PrivateKeyType { case ECPrivateKey: diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go index 755ff66f80..a385e40768 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go @@ -1,9 +1,9 @@ package consts const ( - //N.B. This needs to be excluded from replication despite the name; it's - //merely saying that this is cluster information for the replicated - //cluster. + // N.B. This needs to be excluded from replication despite the name; it's + // merely saying that this is cluster information for the replicated + // cluster. CoreReplicatedClusterPrefix = "core/cluster/replicated/" CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" @@ -45,7 +45,6 @@ const ( // We verify no change to the above values are made func init() { - if OldReplicationBootstrapping != 3 { panic("Replication Constants have changed") } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go b/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go index 3337bd97b2..fab9e942d7 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/dbtxn/dbtxn.go @@ -13,7 +13,6 @@ import ( // - config: Optional, may be nil // - query: Required func ExecuteDBQuery(ctx context.Context, db *sql.DB, params map[string]string, query string) error { - parsedQuery := parseQuery(params, query) stmt, err := db.PrepareContext(ctx, parsedQuery) @@ -31,7 +30,6 @@ func ExecuteDBQuery(ctx context.Context, db *sql.DB, params map[string]string, q // - config: Optional, may be nil // - query: Required func ExecuteTxQuery(ctx context.Context, tx *sql.Tx, params map[string]string, query string) error { - parsedQuery := parseQuery(params, query) stmt, err := tx.PrepareContext(ctx, parsedQuery) @@ -51,7 +49,6 @@ func execute(ctx context.Context, stmt *sql.Stmt) error { } func parseQuery(m map[string]string, tpl string) string { - if m == nil || len(m) <= 0 { return tpl } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/identitytpl/templating.go b/vendor/github.com/hashicorp/vault/sdk/helper/identitytpl/templating.go index 3b742dc5bd..85166bf4f2 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/identitytpl/templating.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/identitytpl/templating.go @@ -164,7 +164,6 @@ func PopulateString(p PopulateStringInput) (bool, string, error) { } func performTemplating(input string, p *PopulateStringInput) (string, error) { - performAliasTemplating := func(trimmed string, alias *logical.Alias) (string, error) { switch { case trimmed == "id": diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go index 3a796c58f5..039b05ad05 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/lock_manager.go @@ -22,9 +22,7 @@ const ( currentConvergentVersion = 3 ) -var ( - errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") -) +var errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") // PolicyRequest holds values used when requesting a policy. Most values are // only used during an upsert. diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go index f49dbe245c..ba1b437013 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/keysutil/policy.go @@ -312,7 +312,7 @@ type Policy struct { deleted uint32 Name string `json:"name"` - Key []byte `json:"key,omitempty"` //DEPRECATED + Key []byte `json:"key,omitempty"` // DEPRECATED Keys keyEntryMap `json:"keys"` // Derived keys MUST provide a context and the master underlying key is diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go index 27578e3fd3..a8d30674b1 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go @@ -51,7 +51,6 @@ func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { // ParseLogFormat parses the log format from the provided string. func ParseLogFormat(format string) (LogFormat, error) { - switch strings.ToLower(strings.TrimSpace(format)) { case "": return UnspecifiedFormat, nil diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go index 85beaf214d..1d6cc1df39 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/policyutil/policyutil.go @@ -107,10 +107,10 @@ func EquivalentPolicies(a, b []string) bool { // Now we'll build our checking slices var sortedA, sortedB []string - for keyA, _ := range mapA { + for keyA := range mapA { sortedA = append(sortedA, keyA) } - for keyB, _ := range mapB { + for keyB := range mapB { sortedB = append(sortedB, keyB) } sort.Strings(sortedA) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go index bc4dd1d733..29f9748344 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/tokenutil/tokenutil.go @@ -71,7 +71,7 @@ func AddTokenFieldsWithAllowList(m map[string]*framework.FieldSchema, allowed [] // TokenFields provides a set of field schemas for the parameters func TokenFields() map[string]*framework.FieldSchema { return map[string]*framework.FieldSchema{ - "token_bound_cidrs": &framework.FieldSchema{ + "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, DisplayAttrs: &framework.DisplayAttributes{ @@ -80,7 +80,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_explicit_max_ttl": &framework.FieldSchema{ + "token_explicit_max_ttl": { Type: framework.TypeDurationSecond, Description: tokenExplicitMaxTTLHelp, DisplayAttrs: &framework.DisplayAttributes{ @@ -89,7 +89,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_max_ttl": &framework.FieldSchema{ + "token_max_ttl": { Type: framework.TypeDurationSecond, Description: "The maximum lifetime of the generated token", DisplayAttrs: &framework.DisplayAttributes{ @@ -98,7 +98,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_no_default_policy": &framework.FieldSchema{ + "token_no_default_policy": { Type: framework.TypeBool, Description: "If true, the 'default' policy will not automatically be added to generated tokens", DisplayAttrs: &framework.DisplayAttributes{ @@ -107,7 +107,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_period": &framework.FieldSchema{ + "token_period": { Type: framework.TypeDurationSecond, Description: tokenPeriodHelp, DisplayAttrs: &framework.DisplayAttributes{ @@ -116,7 +116,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_policies": &framework.FieldSchema{ + "token_policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", DisplayAttrs: &framework.DisplayAttributes{ @@ -125,7 +125,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_type": &framework.FieldSchema{ + "token_type": { Type: framework.TypeString, Default: "default-service", Description: "The type of token to generate, service or batch", @@ -135,7 +135,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_ttl": &framework.FieldSchema{ + "token_ttl": { Type: framework.TypeDurationSecond, Description: "The initial ttl of the token to generate", DisplayAttrs: &framework.DisplayAttributes{ @@ -144,7 +144,7 @@ func TokenFields() map[string]*framework.FieldSchema { }, }, - "token_num_uses": &framework.FieldSchema{ + "token_num_uses": { Type: framework.TypeInt, Description: "The maximum number of times a token may be used, a value of zero means unlimited", DisplayAttrs: &framework.DisplayAttributes{ diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go index 682ecf714e..2cd07715c2 100644 --- a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go @@ -11,9 +11,7 @@ type StorageView struct { prefix string } -var ( - ErrRelativePath = errors.New("relative paths not supported") -) +var ErrRelativePath = errors.New("relative paths not supported") func NewStorageView(storage Storage, prefix string) *StorageView { return &StorageView{ diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go index f489858442..52768776a6 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go @@ -69,10 +69,12 @@ type TransactionalCache struct { } // Verify Cache satisfies the correct interfaces -var _ ToggleablePurgemonster = (*Cache)(nil) -var _ ToggleablePurgemonster = (*TransactionalCache)(nil) -var _ Backend = (*Cache)(nil) -var _ Transactional = (*TransactionalCache)(nil) +var ( + _ ToggleablePurgemonster = (*Cache)(nil) + _ ToggleablePurgemonster = (*TransactionalCache)(nil) + _ Backend = (*Cache)(nil) + _ Transactional = (*TransactionalCache)(nil) +) // NewCache returns a physical cache of the given size. // If no size is provided, the default size is used. diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go index d2f93478b0..dbde84cc6d 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go @@ -8,8 +8,10 @@ import ( "unicode/utf8" ) -var ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") -var ErrNonPrintable = errors.New("key contains non-printable characters") +var ( + ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") + ErrNonPrintable = errors.New("key contains non-printable characters") +) // StorageEncoding is used to add errors into underlying physical requests type StorageEncoding struct { @@ -24,8 +26,10 @@ type TransactionalStorageEncoding struct { } // Verify StorageEncoding satisfies the correct interfaces -var _ Backend = (*StorageEncoding)(nil) -var _ Transactional = (*TransactionalStorageEncoding)(nil) +var ( + _ Backend = (*StorageEncoding)(nil) + _ Transactional = (*TransactionalStorageEncoding)(nil) +) // NewStorageEncoding returns a wrapped physical backend and verifies the key // encoding diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/error.go b/vendor/github.com/hashicorp/vault/sdk/physical/error.go index 8091f178bc..b547e4e428 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/error.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/error.go @@ -31,8 +31,10 @@ type TransactionalErrorInjector struct { } // Verify ErrorInjector satisfies the correct interfaces -var _ Backend = (*ErrorInjector)(nil) -var _ Transactional = (*TransactionalErrorInjector)(nil) +var ( + _ Backend = (*ErrorInjector)(nil) + _ Transactional = (*TransactionalErrorInjector)(nil) +) // NewErrorInjector returns a wrapped physical backend to inject error func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/file/file.go b/vendor/github.com/hashicorp/vault/sdk/physical/file/file.go index d08d1c2b67..320ee21caa 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/file/file.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/file/file.go @@ -21,9 +21,11 @@ import ( ) // Verify FileBackend satisfies the correct interfaces -var _ physical.Backend = (*FileBackend)(nil) -var _ physical.Transactional = (*TransactionalFileBackend)(nil) -var _ physical.PseudoTransactional = (*FileBackend)(nil) +var ( + _ physical.Backend = (*FileBackend)(nil) + _ physical.Transactional = (*TransactionalFileBackend)(nil) + _ physical.PseudoTransactional = (*FileBackend)(nil) +) // FileBackend is a physical backend that stores data on disk // at a given file path. It can be used for durable single server @@ -234,7 +236,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er } // Make the parent tree - if err := os.MkdirAll(path, 0700); err != nil { + if err := os.MkdirAll(path, 0o700); err != nil { return err } @@ -243,7 +245,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er f, err := os.OpenFile( fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, - 0600) + 0o600) if err != nil { if f != nil { f.Close() diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go index 9739a7587a..b366eb84bf 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go @@ -17,12 +17,14 @@ import ( ) // Verify interfaces are satisfied -var _ physical.Backend = (*InmemBackend)(nil) -var _ physical.HABackend = (*InmemHABackend)(nil) -var _ physical.HABackend = (*TransactionalInmemHABackend)(nil) -var _ physical.Lock = (*InmemLock)(nil) -var _ physical.Transactional = (*TransactionalInmemBackend)(nil) -var _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +var ( + _ physical.Backend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +) var ( PutDisabledError = errors.New("put operations disabled in inmem backend") diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go index 51bb560c2f..11b413c4d0 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go @@ -32,8 +32,10 @@ type TransactionalLatencyInjector struct { } // Verify LatencyInjector satisfies the correct interfaces -var _ Backend = (*LatencyInjector)(nil) -var _ Transactional = (*TransactionalLatencyInjector)(nil) +var ( + _ Backend = (*LatencyInjector)(nil) + _ Transactional = (*TransactionalLatencyInjector)(nil) +) // NewLatencyInjector returns a wrapped physical backend to simulate latency func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go index d891481983..189ac93172 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go @@ -6,9 +6,7 @@ import ( "strings" ) -var ( - ErrRelativePath = errors.New("relative paths not supported") -) +var ErrRelativePath = errors.New("relative paths not supported") // View represents a prefixed view of a physical backend type View struct { diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go index 0970b8694f..6e0ddfcc0e 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go @@ -458,33 +458,33 @@ func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { } txns := []*TxnEntry{ - &TxnEntry{ + { Operation: PutOperation, Entry: &Entry{ Key: "foo", Value: []byte("bar2"), }, }, - &TxnEntry{ + { Operation: DeleteOperation, Entry: &Entry{ Key: "deleteme", }, }, - &TxnEntry{ + { Operation: PutOperation, Entry: &Entry{ Key: "foo", Value: []byte("bar3"), }, }, - &TxnEntry{ + { Operation: DeleteOperation, Entry: &Entry{ Key: "deleteme2", }, }, - &TxnEntry{ + { Operation: PutOperation, Entry: &Entry{ Key: "zip", diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go b/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go index 17932efe40..82c7287327 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/backend.go @@ -12,8 +12,10 @@ import ( "github.com/hashicorp/vault/sdk/plugin/pb" ) -var _ plugin.Plugin = (*GRPCBackendPlugin)(nil) -var _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil) +var ( + _ plugin.Plugin = (*GRPCBackendPlugin)(nil) + _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil) +) // GRPCBackendPlugin is the plugin.Plugin implementation that only supports GRPC // transport diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go index 4bb9a2a4b3..9ea3c23f8c 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/grpc_backend_client.go @@ -17,8 +17,10 @@ import ( "github.com/hashicorp/vault/sdk/plugin/pb" ) -var ErrPluginShutdown = errors.New("plugin is shut down") -var ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") +var ( + ErrPluginShutdown = errors.New("plugin is shut down") + ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") +) // Validate backendGRPCPluginClient satisfies the logical.Backend interface var _ logical.Backend = &backendGRPCPluginClient{} diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go b/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go index a59a8a3da2..ecf6ed01f1 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/logger.go @@ -32,7 +32,6 @@ func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error { } func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error { - switch translateLevel(args.Level) { case hclog.Trace: @@ -107,7 +106,6 @@ type LoggerReply struct { } func translateLevel(logxiLevel int) hclog.Level { - switch logxiLevel { case 1000, 10: diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_errors.go b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_errors.go index 16df5cc25c..05ef474a7e 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_errors.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_errors.go @@ -15,22 +15,22 @@ import ( // it is used to test the invalidate func. func errorPaths(b *backend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "errors/rpc", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathErrorRPCRead, }, }, - &framework.Path{ + { Pattern: "errors/kill", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathErrorRPCRead, }, }, - &framework.Path{ + { Pattern: "errors/type", Fields: map[string]*framework.FieldSchema{ - "err_type": &framework.FieldSchema{Type: framework.TypeInt}, + "err_type": {Type: framework.TypeInt}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.CreateOperation: b.pathErrorRPCRead, @@ -71,5 +71,4 @@ func (b *backend) pathErrorRPCRead(ctx context.Context, req *logical.Request, da } return nil, err - } diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_internal.go b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_internal.go index 30bee271a0..26ede270fa 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_internal.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_internal.go @@ -13,7 +13,7 @@ func pathInternal(b *backend) *framework.Path { return &framework.Path{ Pattern: "internal", Fields: map[string]*framework.FieldSchema{ - "value": &framework.FieldSchema{Type: framework.TypeString}, + "value": {Type: framework.TypeString}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathInternalUpdate, diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_kv.go b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_kv.go index efafe7b2cc..1946b57624 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_kv.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_kv.go @@ -12,18 +12,18 @@ import ( // version of the passthrough backend that only accepts string values. func kvPaths(b *backend) []*framework.Path { return []*framework.Path{ - &framework.Path{ + { Pattern: "kv/?", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKVList, }, }, - &framework.Path{ + { Pattern: "kv/" + framework.GenericNameRegex("key"), Fields: map[string]*framework.FieldSchema{ - "key": &framework.FieldSchema{Type: framework.TypeString}, - "value": &framework.FieldSchema{Type: framework.TypeString}, - "version": &framework.FieldSchema{Type: framework.TypeInt}, + "key": {Type: framework.TypeString}, + "value": {Type: framework.TypeString}, + "version": {Type: framework.TypeInt}, }, ExistenceCheck: b.pathExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_raw.go b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_raw.go index 41631ddac3..55cb7c9374 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_raw.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_raw.go @@ -25,5 +25,4 @@ func (b *backend) pathRawRead(ctx context.Context, req *logical.Request, data *f logical.HTTPStatusCode: 200, }, }, nil - } diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_special.go b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_special.go index 626dd22215..22afa41c6d 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_special.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/mock/path_special.go @@ -24,5 +24,4 @@ func (b *backend) pathSpecialRead(ctx context.Context, req *logical.Request, dat "data": "foo", }, }, nil - } diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go b/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go index f12a36928c..f4f2d8e18f 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/plugin.go @@ -76,12 +76,12 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "backend": &GRPCBackendPlugin{ MetadataMode: isMetadataMode, }, }, - 4: plugin.PluginSet{ + 4: { "backend": &GRPCBackendPlugin{ MetadataMode: isMetadataMode, }, diff --git a/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go b/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go index b8cd3e58f0..1119a2dac6 100644 --- a/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go +++ b/vendor/github.com/hashicorp/vault/sdk/plugin/serve.go @@ -43,13 +43,13 @@ func Serve(opts *ServeOpts) error { // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. - 3: plugin.PluginSet{ + 3: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, }, }, - 4: plugin.PluginSet{ + 4: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, diff --git a/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/environments/docker/environment.go b/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/environments/docker/environment.go index 7cc0191745..18c93b3775 100644 --- a/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/environments/docker/environment.go +++ b/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/environments/docker/environment.go @@ -90,7 +90,7 @@ func (dc *DockerCluster) Teardown() error { } } - //clean up networks + // clean up networks if dc.networkID != "" { cli, err := docker.NewClientWithOpts(docker.FromEnv, docker.WithVersion(dockerVersion)) if err != nil { @@ -349,7 +349,7 @@ func (dc *DockerCluster) setupCA(opts *DockerClusterOptions) error { dc.CACertPEM = pem.EncodeToMemory(CACertPEMBlock) dc.CACertPEMFile = filepath.Join(dc.tmpDir, "ca", "ca.pem") - err = ioutil.WriteFile(dc.CACertPEMFile, dc.CACertPEM, 0755) + err = ioutil.WriteFile(dc.CACertPEMFile, dc.CACertPEM, 0o755) if err != nil { return err } @@ -415,13 +415,13 @@ func (n *dockerClusterNode) setupCert() error { }) n.ServerCertPEMFile = filepath.Join(n.WorkDir, "cert.pem") - err = ioutil.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0755) + err = ioutil.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0o755) if err != nil { return err } n.ServerKeyPEMFile = filepath.Join(n.WorkDir, "key.pem") - err = ioutil.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0755) + err = ioutil.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0o755) if err != nil { return err } @@ -573,7 +573,7 @@ func (n *dockerClusterNode) start(cli *docker.Client, caDir, netName string, net return err } - err = ioutil.WriteFile(filepath.Join(n.WorkDir, "local.json"), cfgJSON, 0644) + err = ioutil.WriteFile(filepath.Join(n.WorkDir, "local.json"), cfgJSON, 0o644) if err != nil { return err } @@ -693,7 +693,7 @@ var DefaultNumCores = 1 func (cluster *DockerCluster) setupDockerCluster(opts *DockerClusterOptions) error { if opts != nil && opts.tmpDir != "" { if _, err := os.Stat(opts.tmpDir); os.IsNotExist(err) { - if err := os.MkdirAll(opts.tmpDir, 0700); err != nil { + if err := os.MkdirAll(opts.tmpDir, 0o700); err != nil { return err } } @@ -706,7 +706,7 @@ func (cluster *DockerCluster) setupDockerCluster(opts *DockerClusterOptions) err cluster.tmpDir = tempDir } caDir := filepath.Join(cluster.tmpDir, "ca") - if err := os.MkdirAll(caDir, 0755); err != nil { + if err := os.MkdirAll(caDir, 0o755); err != nil { return err } @@ -729,7 +729,7 @@ func (cluster *DockerCluster) setupDockerCluster(opts *DockerClusterOptions) err WorkDir: filepath.Join(cluster.tmpDir, nodeID), } cluster.ClusterNodes = append(cluster.ClusterNodes, node) - if err := os.MkdirAll(node.WorkDir, 0700); err != nil { + if err := os.MkdirAll(node.WorkDir, 0o700); err != nil { return err } } diff --git a/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/helpers.go b/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/helpers.go index 2a48c6834b..81f173990e 100644 --- a/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/helpers.go +++ b/vendor/github.com/hashicorp/vault/sdk/testing/stepwise/helpers.go @@ -8,7 +8,6 @@ import ( "encoding/pem" "errors" "fmt" - "github.com/hashicorp/errwrap" "io" "io/ioutil" "os" @@ -16,6 +15,8 @@ import ( "path" "strings" "sync" + + "github.com/hashicorp/errwrap" ) const pluginPrefix = "vault-plugin-" diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 6cd37280a8..c1c5263c42 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -97,6 +97,7 @@ package module import ( "fmt" + "path" "sort" "strings" "unicode" @@ -224,13 +225,13 @@ func firstPathOK(r rune) bool { } // pathOK reports whether r can appear in an import path element. -// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. // This matches what "go get" has historically recognized in import paths. // TODO(rsc): We would like to allow Unicode letters, but that requires additional // care in the safe encoding (see "escaped paths" above). func pathOK(r rune) bool { if r < utf8.RuneSelf { - return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + return r == '-' || r == '.' || r == '_' || r == '~' || '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' @@ -313,11 +314,13 @@ func CheckPath(path string) error { // separated by slashes (U+002F). (It must not begin with nor end in a slash.) // // A valid path element is a non-empty string made up of -// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. // It must not begin or end with a dot (U+002E), nor contain two dots in a row. // // The element prefix up to the first dot must not be a reserved file name -// on Windows, regardless of case (CON, com1, NuL, and so on). +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). // // CheckImportPath may be less restrictive in the future, but see the // top-level package documentation for additional information about @@ -402,6 +405,29 @@ func checkElem(elem string, fileName bool) error { return fmt.Errorf("%q disallowed as path element component on Windows", short) } } + + if fileName { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + return nil } @@ -716,3 +742,49 @@ func unescapeString(escaped string) (string, bool) { } return string(buf), true } + +// MatchPrefixPatterns reports whether any path prefix of target matches one of +// the glob patterns (as defined by path.Match) in the comma-separated globs +// list. This implements the algorithm used when matching a module path to the +// GOPRIVATE environment variable, as described by 'go help module-private'. +// +// It ignores any empty or malformed patterns in the list. +func MatchPrefixPatterns(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 2988e3cf9c..4338f35177 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -138,6 +138,9 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. +// +// Deprecated: use Compare instead. In most cases, returning a canonicalized +// version is not expected or desired. func Max(v, w string) string { v = Canonical(v) w = Canonical(w) diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index f38b17247b..27708972d1 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -10,7 +10,6 @@ import ( "errors" "flag" "fmt" - "go/build" "go/scanner" "io" "io/ioutil" @@ -22,6 +21,7 @@ import ( "runtime/pprof" "strings" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/imports" ) @@ -43,14 +43,8 @@ var ( TabIndent: true, Comments: true, Fragment: true, - // This environment, and its caches, will be reused for the whole run. Env: &imports.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), + GocmdRunner: &gocommand.Runner{}, }, } exitCode = 0 @@ -58,7 +52,7 @@ var ( func init() { flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") - flag.StringVar(&options.Env.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") + flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.") } @@ -154,7 +148,6 @@ func processFile(filename string, in io.Reader, out io.Writer, argType argumentT // formatting has changed if *list { fmt.Fprintln(out, filename) - exitCode = 1 } if *write { if argType == fromStdin { diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 8c9977355c..8c3c2e7ab9 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -95,12 +95,13 @@ type Pass struct { Analyzer *Analyzer // the identity of the current analyzer // syntax and type information - Fset *token.FileSet // file position information - Files []*ast.File // the abstract syntax tree of each file - OtherFiles []string // names of non-Go files of this package - Pkg *types.Package // type information about the package - TypesInfo *types.Info // type information about the syntax trees - TypesSizes types.Sizes // function for computing sizes of types + Fset *token.FileSet // file position information + Files []*ast.File // the abstract syntax tree of each file + OtherFiles []string // names of non-Go files of this package + IgnoredFiles []string // names of ignored source files in this package + Pkg *types.Package // type information about the package + TypesInfo *types.Info // type information about the syntax trees + TypesSizes types.Sizes // function for computing sizes of types // Report reports a Diagnostic, a finding about a specific location // in the analyzed source code such as a potential mistake. diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index ea56b724e8..9fa3302dfb 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -121,13 +121,14 @@ package being analyzed, and provides operations to the Run function for reporting diagnostics and other information back to the driver. type Pass struct { - Fset *token.FileSet - Files []*ast.File - OtherFiles []string - Pkg *types.Package - TypesInfo *types.Info - ResultOf map[*Analyzer]interface{} - Report func(Diagnostic) + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + IgnoredFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) ... } @@ -139,6 +140,12 @@ files such as assembly that are part of this package. See the "asmdecl" or "buildtags" analyzers for examples of loading non-Go files and reporting diagnostics against them. +The IgnoredFiles field provides the names, but not the contents, +of ignored Go and non-Go source files that are not part of this package +with the current build configuration but may be part of other build +configurations. See the "buildtags" analyzer for an example of loading +and checking IgnoredFiles. + The ResultOf field provides the results computed by the analyzers required by this one, as expressed in its Analyzer.Requires field. The driver runs the required analyzers first and makes their results @@ -170,6 +177,15 @@ Diagnostic is defined as: The optional Category field is a short identifier that classifies the kind of message when an analysis produces several kinds of diagnostic. +Many analyses want to associate diagnostics with a severity level. +Because Diagnostic does not have a severity level field, an Analyzer's +diagnostics effectively all have the same severity level. To separate which +diagnostics are high severity and which are low severity, expose multiple +Analyzers instead. Analyzers should also be separated when their +diagnostics belong in different groups, or could be tagged differently +before being shown to the end user. Analyzers should document their severity +level to help downstream tools surface diagnostics properly. + Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl and buildtag, inspect the raw text of Go source files or even non-Go files such as assembly. To report a diagnostic against a line of a diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go index be98143461..ad0e7276c9 100644 --- a/vendor/golang.org/x/tools/go/analysis/validate.go +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -3,6 +3,7 @@ package analysis import ( "fmt" "reflect" + "strings" "unicode" ) @@ -58,14 +59,28 @@ func Validate(analyzers []*Analyzer) error { } // recursion - for i, req := range a.Requires { + for _, req := range a.Requires { if err := visit(req); err != nil { - return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err) + return err } } color[a] = black } + if color[a] == grey { + stack := []*Analyzer{a} + inCycle := map[string]bool{} + for len(stack) > 0 { + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if color[current] == grey && !inCycle[current.Name] { + inCycle[current.Name] = true + stack = append(stack, current.Requires...) + } + } + return &CycleInRequiresGraphError{AnalyzerNames: inCycle} + } + return nil } for _, a := range analyzers { @@ -95,3 +110,17 @@ func validIdent(name string) bool { } return name != "" } + +type CycleInRequiresGraphError struct { + AnalyzerNames map[string]bool +} + +func (e *CycleInRequiresGraphError) Error() string { + var b strings.Builder + b.WriteString("cycle detected involving the following analyzers:") + for n := range e.AnalyzerNames { + b.WriteByte(' ') + b.WriteString(n) + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 0d51acad99..f8363d8faa 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -85,11 +85,15 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) } - // The indexed export format starts with an 'i'. - if len(data) == 0 || data[0] != 'i' { - return nil, fmt.Errorf("unknown export data format") + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err } - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) return pkg, err } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 0000000000..a807d0aaa2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 0000000000..e9f73d14a1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1039 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 6a9265ea94..e8cba6b237 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -204,11 +204,14 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'. - if len(data) == 0 || data[0] != 'i' { - return nil, fmt.Errorf("unknown export data format") + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) } - _, pkg, err = IImportData(fset, packages, data[1:], id) default: err = fmt.Errorf("unknown export data header: %q", hdr) @@ -488,7 +491,7 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { // // For unqualified and anonymous names, the returned package is the parent // package unless parent == nil, in which case the returned package is the -// package being imported. (The parent package is not nil if the the name +// package being imported. (The parent package is not nil if the name // is an unqualified struct field or interface method name belonging to a // type declared in another package.) // diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go index 858eb9f456..4be32a2e55 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -11,7 +11,6 @@ package gcimporter import ( "bytes" "encoding/binary" - "fmt" "go/ast" "go/constant" "go/token" @@ -26,15 +25,6 @@ import ( // 0: Go1.11 encoding const iexportVersion = 0 -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - // IExportData returns the binary export data for pkg. // // If no file set is provided, position info will be missing. @@ -538,16 +528,6 @@ func constantToFloat(x constant.Value) *big.Float { return &f } -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index fef8b30080..a31a880263 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -18,9 +18,6 @@ import ( "go/types" "io" "sort" - "sync" - "unicode" - "unicode/utf8" ) type intReader struct { @@ -28,10 +25,6 @@ type intReader struct { path string } -func errorf(format string, args ...interface{}) { - panic(fmt.Sprintf(format, args...)) -} - func (r *intReader) int64() int64 { i, err := binary.ReadVarint(r.Reader) if err != nil { @@ -635,166 +628,3 @@ func (r *importReader) byte() byte { } return x } - -const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go - -// Synthesize a token.Pos -type fakeFileSet struct { - fset *token.FileSet - files map[string]*token.File -} - -func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. - - // Since we don't know the set of needed file positions, we - // reserve maxlines positions per file. - const maxlines = 64 * 1024 - f := s.files[file] - if f == nil { - f = s.fset.AddFile(file, -1, maxlines) - s.files[file] = f - // Allocate the fake linebreak indices on first use. - // TODO(adonovan): opt: save ~512KB using a more complex scheme? - fakeLinesOnce.Do(func() { - fakeLines = make([]int, maxlines) - for i := range fakeLines { - fakeLines[i] = i - } - }) - f.SetLines(fakeLines) - } - - if line > maxlines { - line = 1 - } - - // Treat the file as if it contained only newlines - // and column=1: use the line number as the offset. - return f.Pos(line - 1) -} - -var ( - fakeLines []int - fakeLinesOnce sync.Once -) - -func chanDir(d int) types.ChanDir { - // tag values must match the constants in cmd/compile/internal/gc/go.go - switch d { - case 1 /* Crecv */ : - return types.RecvOnly - case 2 /* Csend */ : - return types.SendOnly - case 3 /* Cboth */ : - return types.SendRecv - default: - errorf("unexpected channel dir %d", d) - return 0 - } -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index dc6177c122..f4d73b2339 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -6,12 +6,9 @@ package packagesdriver import ( - "bytes" "context" - "encoding/json" "fmt" "go/types" - "os/exec" "strings" "golang.org/x/tools/internal/gocommand" @@ -19,82 +16,17 @@ import ( var debug = false -func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) { - // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. - const toolPrefix = "GOPACKAGESDRIVER=" - tool := "" - for _, env := range env { - if val := strings.TrimPrefix(env, toolPrefix); val != env { - tool = val - } - } - - if tool == "" { - var err error - tool, err = exec.LookPath("gopackagesdriver") - if err != nil { - // We did not find the driver, so use "go list". - tool = "off" - } - } - - if tool == "off" { - return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir) - } - - req, err := json.Marshal(struct { - Command string `json:"command"` - Env []string `json:"env"` - BuildFlags []string `json:"build_flags"` - }{ - Command: "sizes", - Env: env, - BuildFlags: buildFlags, - }) - if err != nil { - return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) - } - - buf := new(bytes.Buffer) - cmd := exec.CommandContext(ctx, tool) - cmd.Dir = dir - cmd.Env = env - cmd.Stdin = bytes.NewReader(req) - cmd.Stdout = buf - cmd.Stderr = new(bytes.Buffer) - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) - } - var response struct { - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes - } - if err := json.Unmarshal(buf.Bytes(), &response); err != nil { - return nil, err - } - return response.Sizes, nil -} - -func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) { - inv := gocommand.Invocation{ - Verb: "list", - Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}, - Env: env, - BuildFlags: buildFlags, - WorkingDir: dir, - } +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { if strings.Contains(rawErr.Error(), "cannot find main module") { // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? - inv := gocommand.Invocation{ - Verb: "env", - Args: []string{"GOARCH"}, - Env: env, - WorkingDir: dir, - } + inv.Verb = "env" + inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { return nil, enverr diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 8c8473fd0b..7db1d1293a 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -89,7 +89,7 @@ func findExternalDriver(cfg *Config) driver { return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) } if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } var response driverResponse diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cb6b14c1b9..c83ca097a9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "go/types" + "io/ioutil" "log" "os" "os/exec" @@ -89,6 +90,10 @@ type golistState struct { rootDirsError error rootDirs map[string]string + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + // vendorDirs caches the (non)existence of vendor directories. vendorDirs map[string]bool } @@ -135,6 +140,12 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { response := newDeduper() + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + // Fill in response.Sizes asynchronously if necessary. var sizeserr error var sizeswg sync.WaitGroup @@ -142,19 +153,13 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { sizeswg.Add(1) go func() { var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.gocmdRunner, cfg.Dir) + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) // types.SizesFor always returns nil or a *types.StdSizes. response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() }() } - state := &golistState{ - cfg: cfg, - ctx: ctx, - vendorDirs: map[string]bool{}, - } - // Determine files requested in contains patterns var containFiles []string restPatterns := make([]string, 0, len(patterns)) @@ -204,43 +209,60 @@ extractQueries: } } - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{ - { + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ Kind: ListError, Msg: fmt.Sprintf("package %s expected but not seen", id), - }, - }, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } } } } } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } } sizeswg.Wait() @@ -362,32 +384,34 @@ func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, // Fields must match go list; // see $GOROOT/src/cmd/go/internal/load/pkg.go. type jsonPackage struct { - ImportPath string - Dir string - Name string - Export string - GoFiles []string - CompiledGoFiles []string - CFiles []string - CgoFiles []string - CXXFiles []string - MFiles []string - HFiles []string - FFiles []string - SFiles []string - SwigFiles []string - SwigCXXFiles []string - SysoFiles []string - Imports []string - ImportMap map[string]string - Deps []string - Module *Module - TestGoFiles []string - TestImports []string - XTestGoFiles []string - XTestImports []string - ForTest string // q in a "p [q.test]" package, else "" - DepOnly bool + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool Error *jsonPackageError } @@ -539,11 +563,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), forTest: p.ForTest, Module: p.Module, } - if (state.cfg.Mode&TypecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { if len(p.CompiledGoFiles) > len(p.GoFiles) { // We need the cgo definitions, which are in the first // CompiledGoFile after the non-cgo ones. This is a hack but there @@ -635,6 +660,39 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkg.CompiledGoFiles = pkg.GoFiles } + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + if p.Error != nil { msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. // Address golang.org/issue/35964 by appending import stack to error message. @@ -664,7 +722,38 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse return &response, nil } -// getPkgPath finds the package path of a directory if it's relative to a root directory. +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { absDir, err := filepath.Abs(dir) if err != nil { @@ -731,18 +820,47 @@ func golistargs(cfg *Config, words []string) []string { return fullargs } -// invokeGo returns the stdout of a go command invocation. -func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { cfg := state.cfg - - inv := gocommand.Invocation{ - Verb: verb, - Args: args, + return gocommand.Invocation{ BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args gocmdRunner := cfg.gocmdRunner if gocmdRunner == nil { gocmdRunner = &gocommand.Runner{} @@ -784,8 +902,13 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - msg := stderr.String()[len("# "):] + msg := msg[len("# "):] if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { return stdout, nil } @@ -882,6 +1005,67 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return stdout, nil } +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := ioutil.TempDir("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := ioutil.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + func containsGoFile(s []string) bool { for _, f := range s { if strings.HasSuffix(f, ".go") { @@ -891,17 +1075,22 @@ func containsGoFile(s []string) bool { return false } -func cmdDebugStr(cmd *exec.Cmd, args ...string) string { +func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { - split := strings.Split(kv, "=") + split := strings.SplitN(kv, "=", 2) k, v := split[0], split[1] env[k] = v } - var quotedArgs []string - for _, arg := range args { - quotedArgs = append(quotedArgs, strconv.Quote(arg)) - } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 3c99b6e48d..de2c1dc579 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,3 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package packages import ( @@ -8,9 +12,12 @@ import ( "log" "os" "path/filepath" + "regexp" "sort" "strconv" "strings" + + "golang.org/x/tools/internal/gocommand" ) // processGolistOverlay provides rudimentary support for adding @@ -70,9 +77,9 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif // to the overlay. continue } - // if all the overlay files belong to a different package, change the package - // name to that package. Otherwise leave it alone; there will be an error message. - maybeFixPackageName(pkgName, pkgOfDir, dir) + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) nextPackage: for _, p := range response.dr.Packages { if pkgName != p.Name && p.ID != "command-line-arguments" { @@ -89,9 +96,19 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif // because the file is generated in another directory. testVariantOf = p continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage } - // We must have already seen the package of which this is a test variant. if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. if hasTestFiles(p) { testVariantOf = pkg } @@ -102,8 +119,11 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif } } } - // The overlay could have included an entirely new package. - if pkg == nil { + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. pkgPath, ok, err := state.getPkgPath(dir) @@ -113,34 +133,53 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif if !ok { break } + var forTest string // only set for x tests isXTest := strings.HasSuffix(pkgName, "_test") if isXTest { + forTest = pkgPath pkgPath += "_test" } id := pkgPath - if isTestFile && !isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - // Try to reclaim a package with the same id if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) } } - // Otherwise, create a new package - if pkg == nil { - pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest } } } @@ -158,6 +197,8 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif continue } for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. if _, found := pkg.Imports[imp]; found { continue } @@ -216,7 +257,7 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif return modifiedPkgs, needPkgs, err } -// resolveImport finds the the ID of a package given its import path. +// resolveImport finds the ID of a package given its import path. // In particular, it will find the right vendored copy when in GOPATH mode. func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { env, err := state.getEnv() @@ -291,24 +332,25 @@ func (state *golistState) determineRootDirs() (map[string]string, error) { } func (state *golistState) determineRootDirsModules() (map[string]string, error) { - // This will only return the root directory for the main module. - // For now we only support overlays in main modules. + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. // Editing files in the module cache isn't a great idea, so we don't - // plan to ever support that, but editing files in replaced modules - // is something we may want to support. To do that, we'll want to - // do a go list -m to determine the replaced module's module path and - // directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}} - // from the main module to determine if that module is actually a replacement. - // See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751 - // for more information. - out, err := state.invokeGo("list", "-m", "-json") + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") if err != nil { - return nil, err + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } } - m := map[string]string{} - type jsonMod struct{ Path, Dir string } + roots := map[string]string{} + modules := map[string]string{} + var i int for dec := json.NewDecoder(out); dec.More(); { - mod := new(jsonMod) + mod := new(gocommand.ModuleJSON) if err := dec.Decode(mod); err != nil { return nil, err } @@ -318,10 +360,15 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) if err != nil { return nil, err } - m[absDir] = mod.Path + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } } + i++ } - return m, nil + return roots, nil } func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { @@ -415,24 +462,111 @@ func commonDir(a []string) string { // package name, and they all have the same package name, then that name becomes // the package name. // It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, pkgOfDir map[string][]*Package, dir string) bool { +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { names := make(map[string]int) - for _, p := range pkgOfDir[dir] { + for _, p := range pkgsOfDir { names[p.Name]++ } if len(names) != 1 { // some files are in different packages - return false + return } - oldName := "" + var oldName string for k := range names { oldName = k } if newName == oldName { - return false + return } - for _, p := range pkgOfDir[dir] { + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { p.Name = newName } - return true +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1e6f9a4675..38475e8712 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -19,7 +19,6 @@ import ( "log" "os" "path/filepath" - "reflect" "strings" "sync" "time" @@ -27,6 +26,7 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -73,9 +73,9 @@ const ( // NeedTypesSizes adds TypesSizes. NeedTypesSizes - // TypecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. - TypecheckCgo + typecheckCgo // NeedModule adds Module. NeedModule @@ -144,6 +144,12 @@ type Config struct { // the build system's query tool. BuildFlags []string + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -191,6 +197,13 @@ type driver func(cfg *Config, patterns ...string) (*driverResponse, error) // driverResponse contains the results for a driver query. type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + // Sizes, if not nil, is the types.Sizes to use when type checking. Sizes *types.StdSizes @@ -232,14 +245,22 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { return l.refine(response.Roots, response.Packages...) } -// defaultDriver is a driver that looks for an external driver binary, and if -// it does not find it falls back to the built in go list driver. +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { driver := findExternalDriver(cfg) if driver == nil { driver = goListDriver } - return driver(cfg, patterns...) + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil } // A Package describes a loaded Go package. @@ -274,6 +295,11 @@ type Package struct { // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. OtherFiles []string + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + // ExportFile is the absolute path to a file containing type // information for the package as provided by the build system. ExportFile string @@ -346,6 +372,13 @@ func init() { packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { config.(*Config).gocmdRunner = runner } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -388,6 +421,7 @@ type flatPackage struct { GoFiles []string `json:",omitempty"` CompiledGoFiles []string `json:",omitempty"` OtherFiles []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` ExportFile string `json:",omitempty"` Imports map[string]string `json:",omitempty"` } @@ -410,6 +444,7 @@ func (p *Package) MarshalJSON() ([]byte, error) { GoFiles: p.GoFiles, CompiledGoFiles: p.CompiledGoFiles, OtherFiles: p.OtherFiles, + IgnoredFiles: p.IgnoredFiles, ExportFile: p.ExportFile, } if len(p.Imports) > 0 { @@ -696,7 +731,8 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { result[i] = lpkg.Package } for i := range ld.pkgs { - // Clear all unrequested fields, for extra de-Hyrum-ization. + // Clear all unrequested fields, + // to catch programs that use more than they request. if ld.requestedMode&NeedName == 0 { ld.pkgs[i].Name = "" ld.pkgs[i].PkgPath = "" @@ -704,6 +740,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if ld.requestedMode&NeedFiles == 0 { ld.pkgs[i].GoFiles = nil ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil } if ld.requestedMode&NeedCompiledGoFiles == 0 { ld.pkgs[i].CompiledGoFiles = nil @@ -906,18 +943,14 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Error: appendError, Sizes: ld.sizes, } - if (ld.Mode & TypecheckCgo) != 0 { - // TODO: remove this when we stop supporting 1.14. - rtc := reflect.ValueOf(tc).Elem() - usesCgo := rtc.FieldByName("UsesCgo") - if !usesCgo.IsValid() { + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { appendError(Error{ - Msg: "TypecheckCgo requires Go 1.15+", + Msg: "typecheckCgo requires Go 1.15+", Kind: ListError, }) return } - usesCgo.SetBool(true) } types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 26586810c7..01f6e829f7 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -14,6 +14,12 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/lsp/fuzzy" +) + +var ( + GetTypeErrors func(p interface{}) []types.Error + SetTypeErrors func(p interface{}, errors []types.Error) ) func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { @@ -45,32 +51,34 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T default: panic("unknown basic type") } - case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice: + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: return ast.NewIdent("nil") case *types.Struct: - texpr := typeExpr(fset, f, pkg, typ) // typ because we want the name here. + texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here. if texpr == nil { return nil } return &ast.CompositeLit{ Type: texpr, } - case *types.Array: - texpr := typeExpr(fset, f, pkg, u.Elem()) - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: &ast.ArrayType{ - Elt: texpr, - Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())}, - }, - } } return nil } -func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { +// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of +// analysisinternal.ZeroValue) +func IsZeroValue(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { switch t := typ.(type) { case *types.Basic: switch t.Kind() { @@ -79,7 +87,96 @@ func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty default: return ast.NewIdent(t.Name()) } + case *types.Pointer: + x := TypeExpr(fset, f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(fset, f, pkg, t.Key()) + value := TypeExpr(fset, f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(fset, f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(fset, f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(fset, f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } case *types.Named: + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } if t.Obj().Pkg() == pkg { return ast.NewIdent(t.Obj().Name()) } @@ -101,14 +198,15 @@ func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty X: ast.NewIdent(pkgName), Sel: ast.NewIdent(t.Obj().Name()), } + case *types.Struct: + return ast.NewIdent(t.String()) + case *types.Interface: + return ast.NewIdent(t.String()) default: - return nil // TODO: anonymous structs, but who does that + return nil } } -var GetTypeErrors = func(p interface{}) []types.Error { return nil } -var SetTypeErrors = func(p interface{}, errors []types.Error) {} - type TypeErrorPass string const ( @@ -116,3 +214,212 @@ const ( NoResultValues TypeErrorPass = "noresultvalues" UndeclaredName TypeErrorPass = "undeclaredname" ) + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} + +// WalkASTWithParent walks the AST rooted at n. The semantics are +// similar to ast.Inspect except it does not call f(nil). +func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { + var ancestors []ast.Node + ast.Inspect(n, func(n ast.Node) (recurse bool) { + if n == nil { + ancestors = ancestors[:len(ancestors)-1] + return false + } + + var parent ast.Node + if len(ancestors) > 0 { + parent = ancestors[len(ancestors)-1] + } + ancestors = append(ancestors, n) + return f(n, parent) + }) +} + +// FindMatchingIdents finds all identifiers in 'node' that match any of the given types. +// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within +// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that +// is unrecognized. +func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident { + matches := map[types.Type][]*ast.Ident{} + // Initialize matches to contain the variable types we are searching for. + for _, typ := range typs { + if typ == nil { + continue + } + matches[typ] = []*ast.Ident{} + } + seen := map[types.Object]struct{}{} + ast.Inspect(node, func(n ast.Node) bool { + if n == nil { + return false + } + // Prevent circular definitions. If 'pos' is within an assignment statement, do not + // allow any identifiers in that assignment statement to be selected. Otherwise, + // we could do the following, where 'x' satisfies the type of 'f0': + // + // x := fakeStruct{f0: x} + // + assignment, ok := n.(*ast.AssignStmt) + if ok && pos > assignment.Pos() && pos <= assignment.End() { + return false + } + if n.End() > pos { + return n.Pos() <= pos + } + ident, ok := n.(*ast.Ident) + if !ok || ident.Name == "_" { + return true + } + obj := info.Defs[ident] + if obj == nil || obj.Type() == nil { + return true + } + if _, ok := obj.(*types.TypeName); ok { + return true + } + // Prevent duplicates in matches' values. + if _, ok = seen[obj]; ok { + return true + } + seen[obj] = struct{}{} + // Find the scope for the given position. Then, check whether the object + // exists within the scope. + innerScope := pkg.Scope().Innermost(pos) + if innerScope == nil { + return true + } + _, foundObj := innerScope.LookupParent(ident.Name, pos) + if foundObj != obj { + return true + } + // The object must match one of the types that we are searching for. + if idents, ok := matches[obj.Type()]; ok { + matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name)) + } + // If the object type does not exactly match any of the target types, greedily + // find the first target type that the object type can satisfy. + for typ := range matches { + if obj.Type() == typ { + continue + } + if equivalentTypes(obj.Type(), typ) { + matches[typ] = append(matches[typ], ast.NewIdent(ident.Name)) + } + } + return true + }) + return matches +} + +func equivalentTypes(want, got types.Type) bool { + if want == got || types.Identical(want, got) { + return true + } + // Code segment to help check for untyped equality from (golang/go#32146). + if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { + if lhs, ok := got.Underlying().(*types.Basic); ok { + return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType + } + } + return types.AssignableTo(want, got) +} + +// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the +// given pattern. We return the identifier whose name is most similar to the pattern. +func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr { + fuzz := fuzzy.NewMatcher(pattern) + var bestFuzz ast.Expr + highScore := float32(0) // minimum score is 0 (no match) + for _, ident := range idents { + // TODO: Improve scoring algorithm. + score := fuzz.Score(ident.Name) + if score > highScore { + highScore = score + bestFuzz = ident + } else if score == 0 { + // Order matters in the fuzzy matching algorithm. If we find no match + // when matching the target to the identifier, try matching the identifier + // to the target. + revFuzz := fuzzy.NewMatcher(ident.Name) + revScore := revFuzz.Score(pattern) + if revScore > highScore { + highScore = revScore + bestFuzz = ident + } + } + } + return bestFuzz +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go index e37b494915..a6cf0e64a4 100644 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// Event holds the information about an event of note that ocurred. +// Event holds the information about an event of note that occurred. type Event struct { at time.Time diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 9aa7984561..f65aad4ec9 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "time" @@ -23,57 +24,106 @@ import ( // An Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { - // LoadMu guards packages.Load calls and associated state. - loadMu sync.Mutex - serializeLoads int + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) } // 1.13: go: updates to go.mod needed, but contents have changed // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) -// Run calls Runner.RunRaw, serializing requests if they fight over -// go.mod changes. +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } -// RunRaw calls Invocation.runRaw, serializing requests if they fight over +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { - // We want to run invocations concurrently as much as possible. However, - // if go.mod updates are needed, only one can make them and the others will - // fail. We need to retry in those cases, but we don't want to thrash so - // badly we never recover. To avoid that, once we've seen one concurrency - // error, start serializing everything until the backlog has cleared out. - runner.loadMu.Lock() - var locked bool // If true, we hold the mutex and have incremented. - if runner.serializeLoads == 0 { - runner.loadMu.Unlock() - } else { - locked = true - runner.serializeLoads++ - } - defer func() { - if locked { - runner.serializeLoads-- - runner.loadMu.Unlock() - } - }() + // Make sure the runner is always initialized. + runner.initialize() - for { - stdout, stderr, friendlyErr, err := inv.runRaw(ctx) - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err - } - event.Error(ctx, "Load concurrency error, will retry serially", err) - if !locked { - runner.loadMu.Lock() - runner.serializeLoads++ - locked = true + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() } } + + return inv.runWithFriendlyError(ctx, stdout, stderr) } // An Invocation represents a call to the go command. @@ -81,17 +131,19 @@ type Invocation struct { Verb string Args []string BuildFlags []string + ModFlag string + ModFile string + Overlay string + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool Env []string WorkingDir string Logf func(format string, args ...interface{}) } -// RunRaw is like RunPiped, but also returns the raw stderr and error for callers -// that want to do low-level error handling/recovery. -func (i *Invocation) runRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *bytes.Buffer, friendlyError error, rawError error) { - stdout = &bytes.Buffer{} - stderr = &bytes.Buffer{} - rawError = i.RunPiped(ctx, stdout, stderr) +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) if rawError != nil { friendlyError = rawError // Check for 'go' executable not being found. @@ -106,25 +158,48 @@ func (i *Invocation) runRaw(ctx context.Context) (stdout *bytes.Buffer, stderr * return } -// RunPiped is like Run, but relies on the given stdout/stderr -func (i *Invocation) RunPiped(ctx context.Context, stdout, stderr io.Writer) error { +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { log := i.Logf if log == nil { log = func(string, ...interface{}) {} } goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + switch i.Verb { - case "mod": - // mod needs the sub-verb before build flags. - goArgs = append(goArgs, i.Args[0]) - goArgs = append(goArgs, i.BuildFlags...) - goArgs = append(goArgs, i.Args[1:]...) - case "env": - // env doesn't take build flags. + case "env", "version": goArgs = append(goArgs, i.Args...) - default: + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() goArgs = append(goArgs, i.Args...) } cmd := exec.Command("go", goArgs...) @@ -136,12 +211,14 @@ func (i *Invocation) RunPiped(ctx context.Context, stdout, stderr io.Writer) err // The Go stdlib has a special feature where if the cwd and the PWD are the // same node then it trusts the PWD, so by setting it in the env for the child // process we fix up all the paths returned by the go command. - cmd.Env = append(os.Environ(), i.Env...) + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) if i.WorkingDir != "" { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -178,10 +255,19 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { - split := strings.Split(kv, "=") + split := strings.SplitN(kv, "=", 2) k, v := split[0], split[1] env[k] = v } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 0000000000..1cd8d8473e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Replace *ModuleJSON // replaced by this module + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file for this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return nil, false, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + if modFlag != "" { + // Don't override an explicit '-mod=' argument. + return mainMod, modFlag == "vendor", nil + } + if mainMod == nil || !go114 { + return mainMod, false, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return mainMod, true, nil + } + } + return mainMod, false, nil +} + +// getMainModuleAnd114 gets the main module's information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 0000000000..60d45ac0e6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "strings" +) + +// GoVersion checks the go version by running "go list" with modules off. +// It returns the X in Go 1.X. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`} + inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") + // Unset any unneeded flags. + inv.ModFile = "" + inv.ModFlag = "" + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 390cb9db79..925ff53560 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -10,7 +10,6 @@ import ( "bufio" "bytes" "fmt" - "go/build" "io/ioutil" "log" "os" @@ -47,16 +46,6 @@ type Root struct { Type RootType } -// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. -func SrcDirsRoots(ctx *build.Context) []Root { - var roots []Root - roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) - for _, p := range filepath.SplitList(ctx.GOPATH) { - roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) - } - return roots -} - // Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. // For each package found, add will be called (concurrently) with the absolute // paths of the containing source directory and the package directory. diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 3a53bb6bf7..d859617b77 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -7,6 +7,7 @@ package imports import ( "bytes" "context" + "encoding/json" "fmt" "go/ast" "go/build" @@ -31,25 +32,25 @@ import ( // importToGroup is a list of functions which map from an import path to // a group number. -var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){ - func(env *ProcessEnv, importPath string) (num int, ok bool) { - if env.LocalPrefix == "" { +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { return } - for _, p := range strings.Split(env.LocalPrefix, ",") { + for _, p := range strings.Split(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { if strings.HasPrefix(importPath, "appengine") { return 2, true } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { firstComponent := strings.Split(importPath, "/")[0] if strings.Contains(firstComponent, ".") { return 1, true @@ -58,9 +59,9 @@ var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool }, } -func importGroup(env *ProcessEnv, importPath string) int { +func importGroup(localPrefix, importPath string) int { for _, fn := range importToGroup { - if n, ok := fn(env, importPath); ok { + if n, ok := fn(localPrefix, importPath); ok { return n } } @@ -82,7 +83,7 @@ type ImportFix struct { IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). FixType ImportFixType - Relevance int // see pkg + Relevance float64 // see pkg } // An ImportInfo represents a single import statement. @@ -277,7 +278,12 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) if err != nil { return err } @@ -567,7 +573,9 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } - addStdlibCandidates(p, p.missingRefs) + if err := addStdlibCandidates(p, p.missingRefs); err != nil { + return nil, err + } p.assumeSiblingImportsValid() if fixes, done := p.fix(); done { return fixes, nil @@ -584,9 +592,9 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// Highest relevance, used for the standard library. Chosen arbitrarily to -// match pre-existing gopls code. -const MaxRelevance = 7 +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 // getCandidatePkgs works with the passed callback to find all acceptable packages. // It deduplicates by import path, and uses a cached stdlib rather than reading @@ -595,22 +603,28 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena notSelf := func(p *pkg) bool { return p.packageName != filePkg || p.dir != filepath.Dir(filename) } + goenv, err := env.goEnv() + if err != nil { + return err + } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + // Start off with the standard library. for importPath, exports := range stdlib { p := &pkg{ - dir: filepath.Join(env.GOROOT, "src", importPath), + dir: filepath.Join(goenv["GOROOT"], "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), relevance: MaxRelevance, } - if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + dupCheck[importPath] = struct{}{} + if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { wrappedCallback.exportsLoaded(p, exports) } } - var mu sync.Mutex - dupCheck := map[string]struct{}{} - scanFilter := &scanCallback{ rootFound: func(root gopathwalk.Root) bool { // Exclude goroot results -- getting them is relatively expensive, not cached, @@ -639,15 +653,23 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena wrappedCallback.exportsLoaded(pkg, exports) }, } - return env.GetResolver().scan(ctx, scanFilter) + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) } -func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { - result := make(map[string]int) - for _, path := range paths { - result[path] = env.GetResolver().scoreImportPath(ctx, path) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) + resolver, err := env.GetResolver() + if err != nil { + return nil, err } - return result + for _, path := range paths { + result[path] = resolver.scoreImportPath(ctx, path) + } + return result, nil } func PrimeCache(ctx context.Context, env *ProcessEnv) error { @@ -673,8 +695,9 @@ func candidateImportName(pkg *pkg) string { return "" } -// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { +// GetAllCandidates calls wrapped for each package whose name starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -707,13 +730,43 @@ func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix return getCandidatePkgs(ctx, callback, filename, filePkg, env) } +// GetImportPaths calls wrapped for each package whose import path starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + return strings.HasPrefix(pkg.importPathShort, searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + // A PackageExport is a package and its exports. type PackageExport struct { Fix *ImportFix Exports []string } -func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -743,85 +796,154 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} + // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. type ProcessEnv struct { - LocalPrefix string - GocmdRunner *gocommand.Runner BuildFlags []string + ModFlag string + ModFile string - // If non-empty, these will be used instead of the - // process-wide values. - GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string - WorkingDir string + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) + initialized bool + resolver Resolver } +func (e *ProcessEnv) goEnv() (map[string]string, error) { + if err := e.init(); err != nil { + return nil, err + } + return e.Env, nil +} + +func (e *ProcessEnv) matchFile(dir, name string) (bool, error) { + bctx, err := e.buildContext() + if err != nil { + return false, err + } + return bctx.MatchFile(dir, name) +} + // CopyConfig copies the env's configuration into a new env. func (e *ProcessEnv) CopyConfig() *ProcessEnv { - copy := *e - copy.resolver = nil - return © + copy := &ProcessEnv{ + GocmdRunner: e.GocmdRunner, + initialized: e.initialized, + BuildFlags: e.BuildFlags, + Logf: e.Logf, + WorkingDir: e.WorkingDir, + resolver: nil, + Env: map[string]string{}, + } + for k, v := range e.Env { + copy.Env[k] = v + } + return copy +} + +func (e *ProcessEnv) init() error { + if e.initialized { + return nil + } + + foundAllRequired := true + for _, k := range RequiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break + } + } + if foundAllRequired { + e.initialized = true + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + e.initialized = true + return nil } func (e *ProcessEnv) env() []string { - env := os.Environ() - add := func(k, v string) { - if v != "" { - env = append(env, k+"="+v) - } - } - add("GOPATH", e.GOPATH) - add("GOROOT", e.GOROOT) - add("GO111MODULE", e.GO111MODULE) - add("GOPROXY", e.GOPROXY) - add("GOFLAGS", e.GOFLAGS) - add("GOSUMDB", e.GOSUMDB) - if e.WorkingDir != "" { - add("PWD", e.WorkingDir) + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) } return env } -func (e *ProcessEnv) GetResolver() Resolver { +func (e *ProcessEnv) GetResolver() (Resolver, error) { if e.resolver != nil { - return e.resolver + return e.resolver, nil } - out, err := e.invokeGo(context.TODO(), "env", "GOMOD") - if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { + if err := e.init(); err != nil { + return nil, err + } + if len(e.Env["GOMOD"]) == 0 { e.resolver = newGopathResolver(e) - return e.resolver + return e.resolver, nil } e.resolver = newModuleResolver(e) - return e.resolver + return e.resolver, nil } -func (e *ProcessEnv) buildContext() *build.Context { +func (e *ProcessEnv) buildContext() (*build.Context, error) { ctx := build.Default - ctx.GOROOT = e.GOROOT - ctx.GOPATH = e.GOPATH + goenv, err := e.goEnv() + if err != nil { + return nil, err + } + ctx.GOROOT = goenv["GOROOT"] + ctx.GOPATH = goenv["GOPATH"] // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). // Populate it only if present. rc := reflect.ValueOf(&ctx).Elem() dir := rc.FieldByName("Dir") - if !dir.IsValid() { - // Working drafts of Go 1.14 named the field "WorkingDir" instead. - // TODO(bcmills): Remove this case after the Go 1.14 beta has been released. - dir = rc.FieldByName("WorkingDir") - } if dir.IsValid() && dir.Kind() == reflect.String { dir.SetString(e.WorkingDir) } - return &ctx + // Since Go 1.11, go/build.Context.Import may invoke 'go list' depending on + // the value in GO111MODULE in the process's environment. We always want to + // run in GOPATH mode when calling Import, so we need to prevent this from + // happening. In Go 1.16, GO111MODULE defaults to "on", so this problem comes + // up more frequently. + // + // HACK: setting any of the Context I/O hooks prevents Import from invoking + // 'go list', regardless of GO111MODULE. This is undocumented, but it's + // unlikely to change before GOPATH support is removed. + ctx.ReadDir = ioutil.ReadDir + + return &ctx, nil } func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { @@ -836,10 +958,14 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs references) { +func addStdlibCandidates(pass *pass, refs references) error { + goenv, err := pass.env.goEnv() + if err != nil { + return err + } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { return } exports := copyExports(stdlib[pkg]) @@ -860,6 +986,7 @@ func addStdlibCandidates(pass *pass, refs references) { } } } + return nil } // A Resolver does the build-system-specific parts of goimports. @@ -872,7 +999,7 @@ type Resolver interface { // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) // scoreImportPath returns the relevance for an import path. - scoreImportPath(ctx context.Context, path string) int + scoreImportPath(ctx context.Context, path string) float64 ClearForNewScan() } @@ -924,10 +1051,13 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return false // We'll do our own loading after we sort. }, } - err := pass.env.GetResolver().scan(context.Background(), callback) + resolver, err := pass.env.GetResolver() if err != nil { return err } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } // Search for imports matching potential package references. type result struct { @@ -1053,21 +1183,24 @@ func (r *gopathResolver) ClearForNewScan() { func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { names := map[string]string{} + bctx, err := r.env.buildContext() + if err != nil { + return nil, err + } for _, path := range importPaths { - names[path] = importPathToName(r.env, path, srcDir) + names[path] = importPathToName(bctx, path, srcDir) } return names, nil } // importPathToName finds out the actual package name, as declared in its .go files. -// If there's a problem, it returns "". -func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) { +func importPathToName(bctx *build.Context, importPath, srcDir string) string { // Fast path for standard library without going to disk. if _, ok := stdlib[importPath]; ok { return path.Base(importPath) // stdlib packages always match their paths. } - buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly) + buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly) if err != nil { return "" } @@ -1131,10 +1264,10 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") - importPathShort string // vendorless import path ("net/http", "a/b") - packageName string // package name loaded from source if requested - relevance int // a weakly-defined score of how relevant a package is. 0 is most relevant. + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. } type pkgDistance struct { @@ -1228,8 +1361,18 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error } stop := r.cache.ScanAndListen(ctx, processDir) defer stop() + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + var roots []gopathwalk.Root + roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT}) + for _, p := range filepath.SplitList(goenv["GOPATH"]) { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH}) + } // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. - roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + roots = filterRoots(roots, callback.rootFound) // We can't cancel walks, because we need them to finish to have a usable // cache. Instead, run them in a separate goroutine and detach. scanDone := make(chan struct{}) @@ -1250,7 +1393,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error return nil } -func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { if _, ok := stdlib[path]; ok { return MaxRelevance } @@ -1289,8 +1432,6 @@ func VendorlessPath(ipath string) string { } func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { - var exports []string - // Look for non-test, buildable .go files which could provide exports. all, err := ioutil.ReadDir(dir) if err != nil { @@ -1302,7 +1443,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { continue } - match, err := env.buildContext().MatchFile(dir, fi.Name()) + match, err := env.matchFile(dir, fi.Name()) if err != nil || !match { continue } @@ -1314,6 +1455,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } var pkgName string + var exports []string fset := token.NewFileSet() for _, fi := range files { select { @@ -1325,7 +1467,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl fullFile := filepath.Join(dir, fi.Name()) f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fullFile, err) + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue } if f.Name.Name == "documentation" { // Special case from go/build.ImportDir, not @@ -1365,6 +1510,10 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } // Collect exports for packages with matching names. rescv := make([]chan *pkg, len(candidates)) @@ -1403,7 +1552,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa } // If we're an x_test, load the package under test's test variant. includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) if err != nil { if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f43d6b22e5..2815edc33d 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,29 +11,29 @@ package imports import ( "bufio" "bytes" - "context" "fmt" "go/ast" - "go/build" "go/format" "go/parser" "go/printer" "go/token" "io" - "io/ioutil" - "os" "regexp" "strconv" "strings" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/gocommand" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. type Options struct { Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + Fragment bool // Accept fragment of a source file (no package statement) AllErrors bool // Report all errors (not just the first 10 on different lines) @@ -44,13 +44,8 @@ type Options struct { FormatOnly bool // Disable the insertion and deletion of imports } -// Process implements golang.org/x/tools/imports.Process with explicit context in env. +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, adjust, err := parse(fileSet, filename, src, opt) if err != nil { @@ -66,16 +61,12 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e } // FixImports returns a list of fixes to the imports that, when applied, -// will leave the imports in the same state as Process. +// will leave the imports in the same state as Process. src and opt must +// be specified. // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { @@ -86,13 +77,9 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, } // ApplyFixes applies all of the fixes to the file and formats it. extraMode -// is added in when parsing the file. +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() @@ -116,63 +103,9 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the packages starting with prefix that can be -// imported by filename, sorted by import path. -func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) -} - -// GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) -} - -// initialize sets the values for opt and src. -// If they are provided, they are not changed. Otherwise opt is set to the -// default values and src is read from the file system. -func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) { - // Use defaults if opt is nil. - if opt == nil { - opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} - } - - // Set the env if the user has not provided it. - if opt.Env == nil { - opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), - } - } - // Set the gocmdRunner if the user has not provided it. - if opt.Env.GocmdRunner == nil { - opt.Env.GocmdRunner = &gocommand.Runner{} - } - if src == nil { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, nil, err - } - src = b - } - - return src, opt, nil -} - func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { - mergeImports(opt.Env, fileSet, file) - sortImports(opt.Env, fileSet, file) + mergeImports(fileSet, file) + sortImports(opt.LocalPrefix, fileSet, file) imps := astutil.Imports(fileSet, file) var spacesBefore []string // import paths we need spaces before for _, impSection := range imps { @@ -183,7 +116,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func( lastGroup := -1 for _, importSpec := range impSection { importPath, _ := strconv.Unquote(importSpec.Path.Value) - groupNum := importGroup(opt.Env, importPath) + groupNum := importGroup(opt.LocalPrefix, importPath) if groupNum != lastGroup && lastGroup != -1 { spacesBefore = append(spacesBefore, importPath) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 69e3eecc4c..ce3269a430 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -15,7 +15,7 @@ import ( "strings" "golang.org/x/mod/module" - "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -24,31 +24,21 @@ import ( type ModuleResolver struct { env *ProcessEnv moduleCacheDir string - dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. roots []gopathwalk.Root scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. scannedRoots map[gopathwalk.Root]bool initialized bool - main *ModuleJSON - modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*ModuleJSON // ...or Dir. + main *gocommand.ModuleJSON + modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*gocommand.ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache otherCache *dirInfoCache } -type ModuleJSON struct { - Path string // module path - Replace *ModuleJSON // replaced by this module - Main bool // is this the main module? - Indirect bool // is this module only an indirect dependency of main module? - Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file for this module, if any - GoVersion string // go version used in module -} - func newModuleResolver(e *ProcessEnv) *ModuleResolver { r := &ModuleResolver{ env: e, @@ -62,7 +52,20 @@ func (r *ModuleResolver) init() error { if r.initialized { return nil } - mainMod, vendorEnabled, err := vendorEnabled(r.env) + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + ModFile: r.env.ModFile, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { return err } @@ -71,18 +74,26 @@ func (r *ModuleResolver) init() error { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. r.main = mainMod - r.dummyVendorMod = &ModuleJSON{ + r.dummyVendorMod = &gocommand.ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() } - r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return fmt.Errorf("empty GOPATH") + } + r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + } sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { @@ -98,7 +109,7 @@ func (r *ModuleResolver) init() error { }) r.roots = []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT}, } if r.main != nil { r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) @@ -106,7 +117,7 @@ func (r *ModuleResolver) init() error { if vendorEnabled { r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) } else { - addDep := func(mod *ModuleJSON) { + addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { // This is redundant with the cache, but we'll skip it cheaply enough. r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) @@ -151,7 +162,7 @@ func (r *ModuleResolver) initAllMods() error { return err } for dec := json.NewDecoder(stdout); dec.More(); { - mod := &ModuleJSON{} + mod := &gocommand.ModuleJSON{} if err := dec.Decode(mod); err != nil { return err } @@ -197,7 +208,7 @@ func (r *ModuleResolver) ClearForNewMod() { // findPackage returns the module and directory that contains the package at // the given import path, or returns nil, "" if no module is in scope. -func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. for _, m := range r.modsByModPath { @@ -239,7 +250,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { // files in that directory. If not, it could be provided by an // outer module. See #29736. for _, fi := range pkgFiles { - if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok { + if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok { return m, pkgDir } } @@ -283,7 +294,7 @@ func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info // findModuleByDir returns the module that contains dir, or nil if no such // module is in scope. -func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // This is quite tricky and may not be correct. dir could be: // - a package in the main module. // - a replace target underneath the main module's directory. @@ -310,7 +321,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // dirIsNestedModule reports if dir is contained in a nested module underneath // mod, not actually in mod. -func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool { +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { if !strings.HasPrefix(dir, mod.Dir) { return false } @@ -340,10 +351,11 @@ func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { } if r.dirInModuleCache(dir) { - matches := modCacheRegexp.FindStringSubmatch(dir) - index := strings.Index(dir, matches[1]+"@"+matches[2]) - modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) - return modDir, readModName(filepath.Join(modDir, "go.mod")) + if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } } for { if info, ok := r.cacheLoad(dir); ok { @@ -482,7 +494,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return nil } -func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { if _, ok := stdlib[path]; ok { return MaxRelevance } @@ -490,17 +502,31 @@ func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { return modRelevance(mod) } -func modRelevance(mod *ModuleJSON) int { +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 switch { case mod == nil: // out of scope return MaxRelevance - 4 case mod.Indirect: - return MaxRelevance - 3 + relevance = MaxRelevance - 3 case !mod.Main: - return MaxRelevance - 2 + relevance = MaxRelevance - 2 default: - return MaxRelevance - 1 // main module ties with stdlib + relevance = MaxRelevance - 1 // main module ties with stdlib } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } + } + + return relevance } // canonicalize gets the result of canonicalizing the packages using the results @@ -656,63 +682,3 @@ func modulePath(mod []byte) string { } return "" // missing module path } - -var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) - -// vendorEnabled indicates if vendoring is enabled. -// Inspired by setDefaultBuildMod in modload/init.go -func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) { - mainMod, go114, err := getMainModuleAnd114(env) - if err != nil { - return nil, false, err - } - matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS) - var modFlag string - if len(matches) != 0 { - modFlag = matches[1] - } - if modFlag != "" { - // Don't override an explicit '-mod=' argument. - return mainMod, modFlag == "vendor", nil - } - if mainMod == nil || !go114 { - return mainMod, false, nil - } - // Check 1.14's automatic vendor mode. - if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { - if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { - // The Go version is at least 1.14, and a vendor directory exists. - // Set -mod=vendor by default. - return mainMod, true, nil - } - } - return mainMod, false, nil -} - -// getMainModuleAnd114 gets the main module's information and whether the -// go command in use is 1.14+. This is the information needed to figure out -// if vendoring should be enabled. -func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) { - const format = `{{.Path}} -{{.Dir}} -{{.GoMod}} -{{.GoVersion}} -{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} -` - stdout, err := env.invokeGo(context.TODO(), "list", "-m", "-f", format) - if err != nil { - return nil, false, nil - } - lines := strings.Split(stdout.String(), "\n") - if len(lines) < 5 { - return nil, false, fmt.Errorf("unexpected stdout: %q", stdout) - } - mod := &ModuleJSON{ - Path: lines[0], - Dir: lines[1], - GoMod: lines[2], - GoVersion: lines[3], - Main: true, - } - return mod, lines[4] == "go1.14", nil -} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 226279471d..be8ffa25fe 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -15,7 +15,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { @@ -40,11 +40,11 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) i = j } } - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. @@ -60,7 +60,7 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { // mergeImports merges all the import declarations into the first one. // Taken from golang.org/x/tools/ast/astutil. -func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func mergeImports(fset *token.FileSet, f *ast.File) { if len(f.Decls) <= 1 { return } @@ -142,7 +142,7 @@ type posSpan struct { End token.Pos } -func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { +func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. @@ -191,7 +191,7 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp // Reassign the import paths to have the same position sequence. // Reassign each comment to abut the end of its spec. // Sort the comments by new position. - sort.Sort(byImportSpec{env, specs}) + sort.Sort(byImportSpec{localPrefix, specs}) // Dedup. Thanks to our sorting, we can just consider // adjacent pairs of imports. @@ -245,8 +245,8 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp } type byImportSpec struct { - env *ProcessEnv - specs []ast.Spec // slice of *ast.ImportSpec + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec } func (x byImportSpec) Len() int { return len(x.specs) } @@ -255,8 +255,8 @@ func (x byImportSpec) Less(i, j int) bool { ipath := importPath(x.specs[i]) jpath := importPath(x.specs[j]) - igroup := importGroup(x.env, ipath) - jgroup := importGroup(x.env, jpath) + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) if igroup != jgroup { return igroup < jgroup } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 16252111ff..7b573b9830 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -56,6 +56,7 @@ var stdlib = map[string][]string{ }, "bufio": []string{ "ErrAdvanceTooFar", + "ErrBadReadCount", "ErrBufferFull", "ErrFinalToken", "ErrInvalidUnreadByte", @@ -303,7 +304,9 @@ var stdlib = map[string][]string{ "PrivateKey", "PublicKey", "Sign", + "SignASN1", "Verify", + "VerifyASN1", }, "crypto/ed25519": []string{ "GenerateKey", @@ -322,11 +325,13 @@ var stdlib = map[string][]string{ "CurveParams", "GenerateKey", "Marshal", + "MarshalCompressed", "P224", "P256", "P384", "P521", "Unmarshal", + "UnmarshalCompressed", }, "crypto/hmac": []string{ "Equal", @@ -432,6 +437,7 @@ var stdlib = map[string][]string{ "CurveP521", "Dial", "DialWithDialer", + "Dialer", "ECDSAWithP256AndSHA256", "ECDSAWithP384AndSHA384", "ECDSAWithP521AndSHA512", @@ -507,6 +513,7 @@ var stdlib = map[string][]string{ "ConstraintViolationError", "CreateCertificate", "CreateCertificateRequest", + "CreateRevocationList", "DSA", "DSAWithSHA1", "DSAWithSHA256", @@ -581,6 +588,7 @@ var stdlib = map[string][]string{ "PublicKeyAlgorithm", "PureEd25519", "RSA", + "RevocationList", "SHA1WithRSA", "SHA256WithRSA", "SHA256WithRSAPSS", @@ -694,6 +702,7 @@ var stdlib = map[string][]string{ "String", "Tx", "TxOptions", + "Validator", "Value", "ValueConverter", "Valuer", @@ -2349,6 +2358,27 @@ var stdlib = map[string][]string{ "IMAGE_DIRECTORY_ENTRY_RESOURCE", "IMAGE_DIRECTORY_ENTRY_SECURITY", "IMAGE_DIRECTORY_ENTRY_TLS", + "IMAGE_DLLCHARACTERISTICS_APPCONTAINER", + "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", + "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", + "IMAGE_DLLCHARACTERISTICS_GUARD_CF", + "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", + "IMAGE_DLLCHARACTERISTICS_NO_BIND", + "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", + "IMAGE_DLLCHARACTERISTICS_NO_SEH", + "IMAGE_DLLCHARACTERISTICS_NX_COMPAT", + "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", + "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", + "IMAGE_FILE_32BIT_MACHINE", + "IMAGE_FILE_AGGRESIVE_WS_TRIM", + "IMAGE_FILE_BYTES_REVERSED_HI", + "IMAGE_FILE_BYTES_REVERSED_LO", + "IMAGE_FILE_DEBUG_STRIPPED", + "IMAGE_FILE_DLL", + "IMAGE_FILE_EXECUTABLE_IMAGE", + "IMAGE_FILE_LARGE_ADDRESS_AWARE", + "IMAGE_FILE_LINE_NUMS_STRIPPED", + "IMAGE_FILE_LOCAL_SYMS_STRIPPED", "IMAGE_FILE_MACHINE_AM33", "IMAGE_FILE_MACHINE_AMD64", "IMAGE_FILE_MACHINE_ARM", @@ -2371,6 +2401,25 @@ var stdlib = map[string][]string{ "IMAGE_FILE_MACHINE_THUMB", "IMAGE_FILE_MACHINE_UNKNOWN", "IMAGE_FILE_MACHINE_WCEMIPSV2", + "IMAGE_FILE_NET_RUN_FROM_SWAP", + "IMAGE_FILE_RELOCS_STRIPPED", + "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", + "IMAGE_FILE_SYSTEM", + "IMAGE_FILE_UP_SYSTEM_ONLY", + "IMAGE_SUBSYSTEM_EFI_APPLICATION", + "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", + "IMAGE_SUBSYSTEM_EFI_ROM", + "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", + "IMAGE_SUBSYSTEM_NATIVE", + "IMAGE_SUBSYSTEM_NATIVE_WINDOWS", + "IMAGE_SUBSYSTEM_OS2_CUI", + "IMAGE_SUBSYSTEM_POSIX_CUI", + "IMAGE_SUBSYSTEM_UNKNOWN", + "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", + "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", + "IMAGE_SUBSYSTEM_WINDOWS_CUI", + "IMAGE_SUBSYSTEM_WINDOWS_GUI", + "IMAGE_SUBSYSTEM_XBOX", "ImportDirectory", "NewFile", "Open", @@ -4188,6 +4237,7 @@ var stdlib = map[string][]string{ "DevNull", "Environ", "ErrClosed", + "ErrDeadlineExceeded", "ErrExist", "ErrInvalid", "ErrNoDeadline", @@ -4646,6 +4696,7 @@ var stdlib = map[string][]string{ "ErrRange", "ErrSyntax", "FormatBool", + "FormatComplex", "FormatFloat", "FormatInt", "FormatUint", @@ -4655,6 +4706,7 @@ var stdlib = map[string][]string{ "Itoa", "NumError", "ParseBool", + "ParseComplex", "ParseFloat", "ParseInt", "ParseUint", diff --git a/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go b/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go new file mode 100644 index 0000000000..ac377035ec --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzzy + +import ( + "unicode" +) + +// RuneRole specifies the role of a rune in the context of an input. +type RuneRole byte + +const ( + // RNone specifies a rune without any role in the input (i.e., whitespace/non-ASCII). + RNone RuneRole = iota + // RSep specifies a rune with the role of segment separator. + RSep + // RTail specifies a rune which is a lower-case tail in a word in the input. + RTail + // RUCTail specifies a rune which is an upper-case tail in a word in the input. + RUCTail + // RHead specifies a rune which is the first character in a word in the input. + RHead +) + +// RuneRoles detects the roles of each byte rune in an input string and stores it in the output +// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string +// or when it filled the output. If output is nil, then it gets created. +func RuneRoles(str string, reuse []RuneRole) []RuneRole { + var output []RuneRole + if cap(reuse) < len(str) { + output = make([]RuneRole, 0, len(str)) + } else { + output = reuse[:0] + } + + prev, prev2 := rtNone, rtNone + for i := 0; i < len(str); i++ { + r := rune(str[i]) + + role := RNone + + curr := rtLower + if str[i] <= unicode.MaxASCII { + curr = runeType(rt[str[i]] - '0') + } + + if curr == rtLower { + if prev == rtNone || prev == rtPunct { + role = RHead + } else { + role = RTail + } + } else if curr == rtUpper { + role = RHead + + if prev == rtUpper { + // This and previous characters are both upper case. + + if i+1 == len(str) { + // This is last character, previous was also uppercase -> this is UCTail + // i.e., (current char is C): aBC / BC / ABC + role = RUCTail + } + } + } else if curr == rtPunct { + switch r { + case '.', ':': + role = RSep + } + } + if curr != rtLower { + if i > 1 && output[i-1] == RHead && prev2 == rtUpper && (output[i-2] == RHead || output[i-2] == RUCTail) { + // The previous two characters were uppercase. The current one is not a lower case, so the + // previous one can't be a HEAD. Make it a UCTail. + // i.e., (last char is current char - B must be a UCTail): ABC / ZABC / AB. + output[i-1] = RUCTail + } + } + + output = append(output, role) + prev2 = prev + prev = curr + } + return output +} + +type runeType byte + +const ( + rtNone runeType = iota + rtPunct + rtLower + rtUpper +) + +const rt = "00000000000000000000000000000000000000000000001122222222221000000333333333333333333333333330000002222222222222222222222222200000" + +// LastSegment returns the substring representing the last segment from the input, where each +// byte has an associated RuneRole in the roles slice. This makes sense only for inputs of Symbol +// or Filename type. +func LastSegment(input string, roles []RuneRole) string { + // Exclude ending separators. + end := len(input) - 1 + for end >= 0 && roles[end] == RSep { + end-- + } + if end < 0 { + return "" + } + + start := end - 1 + for start >= 0 && roles[start] != RSep { + start-- + } + + return input[start+1 : end+1] +} + +// ToLower transforms the input string to lower case, which is stored in the output byte slice. +// The lower casing considers only ASCII values - non ASCII values are left unmodified. +// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets +// created. +func ToLower(input string, reuse []byte) []byte { + output := reuse + if cap(reuse) < len(input) { + output = make([]byte, len(input)) + } + + for i := 0; i < len(input); i++ { + r := rune(input[i]) + if r <= unicode.MaxASCII { + if 'A' <= r && r <= 'Z' { + r += 'a' - 'A' + } + } + output[i] = byte(r) + } + return output[:len(input)] +} + +// WordConsumer defines a consumer for a word delimited by the [start,end) byte offsets in an input +// (start is inclusive, end is exclusive). +type WordConsumer func(start, end int) + +// Words find word delimiters in an input based on its bytes' mappings to rune roles. The offset +// delimiters for each word are fed to the provided consumer function. +func Words(roles []RuneRole, consume WordConsumer) { + var wordStart int + for i, r := range roles { + switch r { + case RUCTail, RTail: + case RHead, RNone, RSep: + if i != wordStart { + consume(wordStart, i) + } + wordStart = i + if r != RHead { + // Skip this character. + wordStart = i + 1 + } + } + } + if wordStart != len(roles) { + consume(wordStart, len(roles)) + } +} diff --git a/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go b/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go new file mode 100644 index 0000000000..16a643097d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go @@ -0,0 +1,398 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fuzzy implements a fuzzy matching algorithm. +package fuzzy + +import ( + "bytes" + "fmt" +) + +const ( + // MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs + // will be truncated to this size. + MaxInputSize = 127 + // MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer + // inputs are truncated to this size. + MaxPatternSize = 63 +) + +type scoreVal int + +func (s scoreVal) val() int { + return int(s) >> 1 +} + +func (s scoreVal) prevK() int { + return int(s) & 1 +} + +func score(val int, prevK int /*0 or 1*/) scoreVal { + return scoreVal(val<<1 + prevK) +} + +// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern. +// The matcher does not support parallel usage. +type Matcher struct { + pattern string + patternLower []byte // lower-case version of the pattern + patternShort []byte // first characters of the pattern + caseSensitive bool // set if the pattern is mix-cased + + patternRoles []RuneRole // the role of each character in the pattern + roles []RuneRole // the role of each character in the tested string + + scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal + + scoreScale float32 + + lastCandidateLen int // in bytes + lastCandidateMatched bool + + // Here we save the last candidate in lower-case. This is basically a byte slice we reuse for + // performance reasons, so the slice is not reallocated for every candidate. + lowerBuf [MaxInputSize]byte + rolesBuf [MaxInputSize]RuneRole +} + +func (m *Matcher) bestK(i, j int) int { + if m.scores[i][j][0].val() < m.scores[i][j][1].val() { + return 1 + } + return 0 +} + +// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern. +func NewMatcher(pattern string) *Matcher { + if len(pattern) > MaxPatternSize { + pattern = pattern[:MaxPatternSize] + } + + m := &Matcher{ + pattern: pattern, + patternLower: ToLower(pattern, nil), + } + + for i, c := range m.patternLower { + if pattern[i] != c { + m.caseSensitive = true + break + } + } + + if len(pattern) > 3 { + m.patternShort = m.patternLower[:3] + } else { + m.patternShort = m.patternLower + } + + m.patternRoles = RuneRoles(pattern, nil) + + if len(pattern) > 0 { + maxCharScore := 4 + m.scoreScale = 1 / float32(maxCharScore*len(pattern)) + } + + return m +} + +// Score returns the score returned by matching the candidate to the pattern. +// This is not designed for parallel use. Multiple candidates must be scored sequentially. +// Returns a score between 0 and 1 (0 - no match, 1 - perfect match). +func (m *Matcher) Score(candidate string) float32 { + if len(candidate) > MaxInputSize { + candidate = candidate[:MaxInputSize] + } + lower := ToLower(candidate, m.lowerBuf[:]) + m.lastCandidateLen = len(candidate) + + if len(m.pattern) == 0 { + // Empty patterns perfectly match candidates. + return 1 + } + + if m.match(candidate, lower) { + sc := m.computeScore(candidate, lower) + if sc > minScore/2 && !m.poorMatch() { + m.lastCandidateMatched = true + if len(m.pattern) == len(candidate) { + // Perfect match. + return 1 + } + + if sc < 0 { + sc = 0 + } + normalizedScore := float32(sc) * m.scoreScale + if normalizedScore > 1 { + normalizedScore = 1 + } + + return normalizedScore + } + } + + m.lastCandidateMatched = false + return 0 +} + +const minScore = -10000 + +// MatchedRanges returns matches ranges for the last scored string as a flattened array of +// [begin, end) byte offset pairs. +func (m *Matcher) MatchedRanges() []int { + if len(m.pattern) == 0 || !m.lastCandidateMatched { + return nil + } + i, j := m.lastCandidateLen, len(m.pattern) + if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 { + return nil + } + + var ret []int + k := m.bestK(i, j) + for i > 0 { + take := (k == 1) + k = m.scores[i][j][k].prevK() + if take { + if len(ret) == 0 || ret[len(ret)-1] != i { + ret = append(ret, i) + ret = append(ret, i-1) + } else { + ret[len(ret)-1] = i - 1 + } + j-- + } + i-- + } + // Reverse slice. + for i := 0; i < len(ret)/2; i++ { + ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i] + } + return ret +} + +func (m *Matcher) match(candidate string, candidateLower []byte) bool { + i, j := 0, 0 + for ; i < len(candidateLower) && j < len(m.patternLower); i++ { + if candidateLower[i] == m.patternLower[j] { + j++ + } + } + if j != len(m.patternLower) { + return false + } + + // The input passes the simple test against pattern, so it is time to classify its characters. + // Character roles are used below to find the last segment. + m.roles = RuneRoles(candidate, m.rolesBuf[:]) + + return true +} + +func (m *Matcher) computeScore(candidate string, candidateLower []byte) int { + pattLen, candLen := len(m.pattern), len(candidate) + + for j := 0; j <= len(m.pattern); j++ { + m.scores[0][j][0] = minScore << 1 + m.scores[0][j][1] = minScore << 1 + } + m.scores[0][0][0] = score(0, 0) // Start with 0. + + segmentsLeft, lastSegStart := 1, 0 + for i := 0; i < candLen; i++ { + if m.roles[i] == RSep { + segmentsLeft++ + lastSegStart = i + 1 + } + } + + // A per-character bonus for a consecutive match. + consecutiveBonus := 2 + wordIdx := 0 // Word count within segment. + for i := 1; i <= candLen; i++ { + + role := m.roles[i-1] + isHead := role == RHead + + if isHead { + wordIdx++ + } else if role == RSep && segmentsLeft > 1 { + wordIdx = 0 + segmentsLeft-- + } + + var skipPenalty int + if i == 1 || (i-1) == lastSegStart { + // Skipping the start of first or last segment. + skipPenalty++ + } + + for j := 0; j <= pattLen; j++ { + // By default, we don't have a match. Fill in the skip data. + m.scores[i][j][1] = minScore << 1 + + // Compute the skip score. + k := 0 + if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() { + k = 1 + } + + skipScore := m.scores[i-1][j][k].val() + // Do not penalize missing characters after the last matched segment. + if j != pattLen { + skipScore -= skipPenalty + } + m.scores[i][j][0] = score(skipScore, k) + + if j == 0 || candidateLower[i-1] != m.patternLower[j-1] { + // Not a match. + continue + } + pRole := m.patternRoles[j-1] + + if role == RTail && pRole == RHead { + if j > 1 { + // Not a match: a head in the pattern matches a tail character in the candidate. + continue + } + // Special treatment for the first character of the pattern. We allow + // matches in the middle of a word if they are long enough, at least + // min(3, pattern.length) characters. + if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) { + continue + } + } + + // Compute the char score. + var charScore int + // Bonus 1: the char is in the candidate's last segment. + if segmentsLeft <= 1 { + charScore++ + } + // Bonus 2: Case match or a Head in the pattern aligns with one in the word. + // Single-case patterns lack segmentation signals and we assume any character + // can be a head of a segment. + if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) { + charScore++ + } + + // Penalty 1: pattern char is Head, candidate char is Tail. + if role == RTail && pRole == RHead { + charScore-- + } + // Penalty 2: first pattern character matched in the middle of a word. + if j == 1 && role == RTail { + charScore -= 4 + } + + // Third dimension encodes whether there is a gap between the previous match and the current + // one. + for k := 0; k < 2; k++ { + sc := m.scores[i-1][j-1][k].val() + charScore + + isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart + if isConsecutive { + // Bonus 3: a consecutive match. First character match also gets a bonus to + // ensure prefix final match score normalizes to 1.0. + // Logically, this is a part of charScore, but we have to compute it here because it + // only applies for consecutive matches (k == 1). + sc += consecutiveBonus + } + if k == 0 { + // Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack + // of alignment. + if role == RTail || role == RUCTail { + sc -= 3 + } + } + + if sc > m.scores[i][j][1].val() { + m.scores[i][j][1] = score(sc, k) + } + } + } + } + + result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val() + + return result +} + +// ScoreTable returns the score table computed for the provided candidate. Used only for debugging. +func (m *Matcher) ScoreTable(candidate string) string { + var buf bytes.Buffer + + var line1, line2, separator bytes.Buffer + line1.WriteString("\t") + line2.WriteString("\t") + for j := 0; j < len(m.pattern); j++ { + line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j])) + separator.WriteString("----------------") + } + + buf.WriteString(line1.String()) + buf.WriteString("\n") + buf.WriteString(separator.String()) + buf.WriteString("\n") + + for i := 1; i <= len(candidate); i++ { + line1.Reset() + line2.Reset() + + line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1])) + line2.WriteString("\t") + + for j := 1; j <= len(m.pattern); j++ { + line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK()))) + line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK()))) + } + buf.WriteString(line1.String()) + buf.WriteString("\n") + buf.WriteString(line2.String()) + buf.WriteString("\n") + buf.WriteString(separator.String()) + buf.WriteString("\n") + } + + return buf.String() +} + +func dir(prevK int) rune { + if prevK == 0 { + return 'M' + } + return 'H' +} + +func (m *Matcher) poorMatch() bool { + if len(m.pattern) < 2 { + return false + } + + i, j := m.lastCandidateLen, len(m.pattern) + k := m.bestK(i, j) + + var counter, len int + for i > 0 { + take := (k == 1) + k = m.scores[i][j][k].prevK() + if take { + len++ + if k == 0 && len < 3 && m.roles[i-1] == RTail { + // Short match in the middle of a word + counter++ + if counter > 1 { + return true + } + } + j-- + } else { + len = 0 + } + i-- + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index ff5a6b17d3..1335a5eed8 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -10,3 +10,8 @@ var GetForTest = func(p interface{}) string { return "" } var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 0000000000..65473eb226 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1358 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNil occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNil + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo +) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 0000000000..97f3ec891f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,152 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNil-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidSelectCase-117] + _ = x[UndeclaredLabel-118] + _ = x[DuplicateLabel-119] + _ = x[MisplacedLabel-120] + _ = x[UnusedLabel-121] + _ = x[JumpOverDecl-122] + _ = x[JumpIntoBlock-123] + _ = x[InvalidMethodExpr-124] + _ = x[WrongArgCount-125] + _ = x[InvalidCall-126] + _ = x[UnusedResults-127] + _ = x[InvalidDefer-128] + _ = x[InvalidGo-129] +} + +const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGo" + +var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1747, 1761, 1775, 1786, 1798, 1811, 1828, 1841, 1852, 1865, 1877, 1886} + +func (i ErrorCode) String() string { + i -= 1 + if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { + return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 0000000000..c3e1a397db --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(terr) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 63e6b27e30..457c8e96dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1306,7 +1306,7 @@ golang.org/x/crypto/ssh/terminal # golang.org/x/lint v0.0.0-20200302205851-738671d3881b golang.org/x/lint golang.org/x/lint/golint -# golang.org/x/mod v0.2.0 +# golang.org/x/mod v0.4.0 golang.org/x/mod/module golang.org/x/mod/semver # golang.org/x/net v0.0.0-20201110031124-69a78807bb2b @@ -1336,7 +1336,7 @@ golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a +# golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c @@ -1369,7 +1369,7 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04 +# golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c ## explicit golang.org/x/tools/cmd/goimports golang.org/x/tools/go/analysis @@ -1394,8 +1394,10 @@ golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports +golang.org/x/tools/internal/lsp/fuzzy golang.org/x/tools/internal/packagesinternal -# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +golang.org/x/tools/internal/typesinternal +# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal # google.golang.org/api v0.29.0 @@ -1762,6 +1764,11 @@ k8s.io/utils/integer ## explicit layeh.com/radius layeh.com/radius/rfc2865 +# mvdan.cc/gofumpt v0.1.1 +## explicit +mvdan.cc/gofumpt +mvdan.cc/gofumpt/format +mvdan.cc/gofumpt/internal/diff # sigs.k8s.io/structured-merge-diff/v3 v3.0.0 sigs.k8s.io/structured-merge-diff/v3/value # sigs.k8s.io/yaml v1.2.0 diff --git a/vendor/mvdan.cc/gofumpt/.gitattributes b/vendor/mvdan.cc/gofumpt/.gitattributes new file mode 100644 index 0000000000..6f95229927 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/.gitattributes @@ -0,0 +1,2 @@ +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text diff --git a/vendor/mvdan.cc/gofumpt/CHANGELOG.md b/vendor/mvdan.cc/gofumpt/CHANGELOG.md new file mode 100644 index 0000000000..7282e45977 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog + +## [0.1.1] - 2021-03-11 + +This bugfix release backports fixes for a few issues: + +* Keep leading empty lines in func bodies if they help readability + +* Avoid breaking comment alignment on empty field lists + +* Add support for `//go-sumtype:` directives + +## [0.1.0] - 2021-01-05 + +This is gofumpt's first release, based on Go 1.15.x. It solidifies the features +which have worked well for over a year. + +This release will be the last to include `gofumports`, the fork of `goimports` +which applies `gofumpt`'s rules on top of updating the Go import lines. Users +who were relying on `goimports` in their editors or IDEs to apply both `gofumpt` +and `goimports` in a single step should switch to gopls, the official Go +language server. It is supported by many popular editors such as VS Code and +Vim, and already bundles gofumpt support. Instructions are available [in the +README](https://github.com/mvdan/gofumpt). + +`gofumports` also added maintenance work and potential confusion to end users. +In the future, there will only be one way to use `gofumpt` from the command +line. We also have a [Go API](https://pkg.go.dev/mvdan.cc/gofumpt/format) for +those building programs with gofumpt. + +Finally, this release adds the `-version` flag, to print the tool's own version. +The flag will work for "master" builds too. + +[0.1.1]: https://github.com/mvdan/gofumpt/releases/tag/v0.1.1 +[0.1.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.1.0 diff --git a/vendor/mvdan.cc/gofumpt/LICENSE b/vendor/mvdan.cc/gofumpt/LICENSE new file mode 100644 index 0000000000..03e3bfc00c --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019, Daniel Martí. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/mvdan.cc/gofumpt/LICENSE.google b/vendor/mvdan.cc/gofumpt/LICENSE.google new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/LICENSE.google @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/mvdan.cc/gofumpt/README.md b/vendor/mvdan.cc/gofumpt/README.md new file mode 100644 index 0000000000..7f5dcf1885 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/README.md @@ -0,0 +1,417 @@ +# gofumpt + + GO111MODULE=on go get mvdan.cc/gofumpt + +Enforce a stricter format than `gofmt`, while being backwards compatible. That +is, `gofumpt` is happy with a subset of the formats that `gofmt` is happy with. + +The tool is a modified fork of `gofmt`, so it can be used as a drop-in +replacement. Running `gofmt` after `gofumpt` should be a no-op. + +Most of the Go source files in this repository belong to the Go project. +The added formatting rules are in the `format` package. + +### Added rules + +No empty lines at the beginning or end of a function + +
example + +``` +func foo() { + println("bar") + +} +``` + +``` +func foo() { + println("bar") +} +``` + +
+ +No empty lines around a lone statement (or comment) in a block + +
example + +``` +if err != nil { + + return err +} +``` + +``` +if err != nil { + return err +} +``` + +
+ +No empty lines before a simple error check + +
example + +``` +foo, err := processFoo() + +if err != nil { + return err +} +``` + +``` +foo, err := processFoo() +if err != nil { + return err +} +``` + +
+ +Composite literals should use newlines consistently + +
example + +``` +// A newline before or after an element requires newlines for the opening and +// closing braces. +var ints = []int{1, 2, + 3, 4} + +// A newline between consecutive elements requires a newline between all +// elements. +var matrix = [][]int{ + {1}, + {2}, { + 3, + }, +} +``` + +``` +var ints = []int{ + 1, 2, + 3, 4, +} + +var matrix = [][]int{ + {1}, + {2}, + { + 3, + }, +} +``` + +
+ +Empty field lists should use a single line + +
example + +``` +var V interface { +} = 3 + +type T struct { +} + +func F( +) +``` + +``` +var V interface{} = 3 + +type T struct{} + +func F() +``` + +``` +var ints = []int{ + 1, 2, + 3, 4, +} + +var matrix = [][]int{ + {1}, + {2}, + { + 3, + }, +} +``` + +
+ +`std` imports must be in a separate group at the top + +
example + +``` +import ( + "foo.com/bar" + + "io" + + "io/ioutil" +) +``` + +``` +import ( + "io" + "io/ioutil" + + "foo.com/bar" +) +``` + +
+ +Short case clauses should take a single line + +
example + +``` +switch c { +case 'a', 'b', + 'c', 'd': +} +``` + +``` +switch c { +case 'a', 'b', 'c', 'd': +} +``` + +
+ +Multiline top-level declarations must be separated by empty lines + +
example + +``` +func foo() { + println("multiline foo") +} +func bar() { + println("multiline bar") +} +``` + +``` +func foo() { + println("multiline foo") +} + +func bar() { + println("multiline bar") +} +``` + +
+ +Single var declarations should not be grouped with parentheses + +
example + +``` +var ( + foo = "bar" +) +``` + +``` +var foo = "bar" +``` + +
+ +Contiguous top-level declarations should be grouped together + +
example + +``` +var nicer = "x" +var with = "y" +var alignment = "z" +``` + +``` +var ( + nicer = "x" + with = "y" + alignment = "z" +) +``` + +
+ + +Simple var-declaration statements should use short assignments + +
example + +``` +var s = "somestring" +``` + +``` +s := "somestring" +``` + +
+ + +The `-s` code simplification flag is enabled by default + +
example + +``` +var _ = [][]int{[]int{1}} +``` + +``` +var _ = [][]int{{1}} +``` + +
+ + +Octal integer literals should use the `0o` prefix on modules using Go 1.13 and later + +
example + +``` +const perm = 0755 +``` + +``` +const perm = 0o755 +``` + +
+ +Comments which aren't Go directives should start with a whitespace + +
example + +``` +//go:noinline + +//Foo is awesome. +func Foo() {} +``` + +``` +//go:noinline + +// Foo is awesome. +func Foo() {} +``` + +
+ +#### Extra rules behind `-extra` + +Adjacent parameters with the same type should be grouped together + +
example + +``` +func Foo(bar string, baz string) {} +``` + +``` +func Foo(bar, baz string) {} +``` + +
+ +### Installation + +`gofumpt` is a replacement for `gofmt`, so you can simply `go get` it as +described at the top of this README and use it. + +When using an IDE or editor with Go integrations, it's best to use `gofumpt` as +part of `gopls`. The instructions below show how to do that for some of the +major editors out there. + +#### Visual Studio Code + +Enable the language server following [the official docs](https://github.com/golang/tools/blob/master/gopls/doc/vscode.md), +and then enable gopls's `gofumpt` option. Note that VS Code will complain about +the `gopls` settings, but they will still work. + +```json +"go.useLanguageServer": true, +"gopls": { + "gofumpt": true, +}, +``` + +#### Goland + +Once `gofumpt` is installed, follow the steps below: + +- Open **Settings** (File > Settings) +- Open the **Tools** section +- Find the *File Watchers* sub-section +- Click on the `+` on the right side to add a new file watcher +- Choose *Custom Template* + +When a window asks for settings, you can enter the following: + +* File Types: Select all .go files +* Scope: Project Files +* Program: Select your `gofumpt` executable +* Arguments: `-w $FilePath$` +* Output path to refresh: `$FilePath$` +* Working directory: `$ProjectFileDir$` +* Environment variables: `GOROOT=$GOROOT$;GOPATH=$GOPATH$;PATH=$GoBinDirs$` + +To avoid unecessary runs, you should disable all checkboxes in the *Advanced* section. + +#### Vim-go + +Ensure you are at least running version +[v1.24](https://github.com/fatih/vim-go/blob/master/CHANGELOG.md#v124---september-15-2020), +and set up `gopls` for formatting code with `gofumpt`: + +```vim +let g:go_fmt_command="gopls" +let g:go_gopls_gofumpt=1 +``` + +#### Govim + +With a [new enough version of govim](https://github.com/govim/govim/pull/1005), +simply configure `gopls` to use `gofumpt`: + +```vim +call govim#config#Set("Gofumpt", 1) +``` + +### Roadmap + +This tool is a place to experiment. In the long term, the features that work +well might be proposed for `gofmt` itself. + +The tool is also compatible with `gofmt` and is aimed to be stable, so you can +rely on it for your code as long as you pin a version of it. + +### License + +Note that much of the code is copied from Go's `gofmt` and `goimports` commands. +You can tell which files originate from the Go repository from their copyright +headers. Their license file is `LICENSE.google`. + +`gofumpt`'s original source files are also under the 3-clause BSD license, with +the separate file `LICENSE`. diff --git a/vendor/mvdan.cc/gofumpt/doc.go b/vendor/mvdan.cc/gofumpt/doc.go new file mode 100644 index 0000000000..2f0f4d0794 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/doc.go @@ -0,0 +1,14 @@ +// Copyright (c) 2019, Daniel Martí +// See LICENSE for licensing information + +package main + +// First, sync the files with x/tools and GOROOT. +//go:generate go run gen.go + +// Then, add the missing imports to our added code. +//go:generate goimports -w . + +// Finally, ensure all code follows 'gofumpt -s'. Use the current source, to not +// need an extra 'go install' step. +//go:generate go run . -s -w . diff --git a/vendor/mvdan.cc/gofumpt/flag.go b/vendor/mvdan.cc/gofumpt/flag.go new file mode 100644 index 0000000000..17c64361d3 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/flag.go @@ -0,0 +1,16 @@ +// Copyright (c) 2019, Daniel Martí +// See LICENSE for licensing information + +package main + +import "flag" + +var ( + langVersion = flag.String("lang", "", "target Go version in the form 1.X (default from go.mod)") + extraRules = flag.Bool("extra", false, "enable extra rules which should be vetted by a human") +) + +func init() { + // make -s default to true + *simplifyAST = true +} diff --git a/vendor/mvdan.cc/gofumpt/format/format.go b/vendor/mvdan.cc/gofumpt/format/format.go new file mode 100644 index 0000000000..808d8ce18b --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/format/format.go @@ -0,0 +1,703 @@ +// Copyright (c) 2019, Daniel Martí +// See LICENSE for licensing information + +// Package format exposes gofumpt's formatting in an API similar to go/format. +// In general, the APIs are only guaranteed to work well when the input source +// is in canonical gofmt format. +package format + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp" + "golang.org/x/mod/semver" + "golang.org/x/tools/go/ast/astutil" +) + +type Options struct { + // LangVersion corresponds to the Go language version a piece of code is + // written in. The version is used to decide whether to apply formatting + // rules which require new language features. When inside a Go module, + // LangVersion should generally be specified as the result of: + // + // go list -m -f {{.GoVersion}} + // + // LangVersion is treated as a semantic version, which might start with + // a "v" prefix. Like Go versions, it might also be incomplete; "1.14" + // is equivalent to "1.14.0". When empty, it is equivalent to "v1", to + // not use language features which could break programs. + LangVersion string + + ExtraRules bool +} + +// Source formats src in gofumpt's format, assuming that src holds a valid Go +// source file. +func Source(src []byte, opts Options) ([]byte, error) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", src, parser.ParseComments) + if err != nil { + return nil, err + } + + File(fset, file, opts) + + var buf bytes.Buffer + if err := format.Node(&buf, fset, file); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// File modifies a file and fset in place to follow gofumpt's format. The +// changes might include manipulating adding or removing newlines in fset, +// modifying the position of nodes, or modifying literal values. +func File(fset *token.FileSet, file *ast.File, opts Options) { + if opts.LangVersion == "" { + opts.LangVersion = "v1" + } else if opts.LangVersion[0] != 'v' { + opts.LangVersion = "v" + opts.LangVersion + } + if !semver.IsValid(opts.LangVersion) { + panic(fmt.Sprintf("invalid semver string: %q", opts.LangVersion)) + } + f := &fumpter{ + File: fset.File(file.Pos()), + fset: fset, + astFile: file, + Options: opts, + } + pre := func(c *astutil.Cursor) bool { + f.applyPre(c) + if _, ok := c.Node().(*ast.BlockStmt); ok { + f.blockLevel++ + } + return true + } + post := func(c *astutil.Cursor) bool { + if _, ok := c.Node().(*ast.BlockStmt); ok { + f.blockLevel-- + } + return true + } + astutil.Apply(file, pre, post) +} + +// Multiline nodes which could fit on a single line under this many +// bytes may be collapsed onto a single line. +const shortLineLimit = 60 + +var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`) + +type fumpter struct { + Options + + *token.File + fset *token.FileSet + + astFile *ast.File + + blockLevel int +} + +func (f *fumpter) commentsBetween(p1, p2 token.Pos) []*ast.CommentGroup { + comments := f.astFile.Comments + i1 := sort.Search(len(comments), func(i int) bool { + return comments[i].Pos() >= p1 + }) + comments = comments[i1:] + i2 := sort.Search(len(comments), func(i int) bool { + return comments[i].Pos() >= p2 + }) + comments = comments[:i2] + return comments +} + +func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment { + comments := f.astFile.Comments + i := sort.Search(len(comments), func(i int) bool { + return comments[i].Pos() >= pos + }) + if i >= len(comments) { + return nil + } + line := f.Line(pos) + for _, comment := range comments[i].List { + if f.Line(comment.Pos()) == line { + return comment + } + } + return nil +} + +// addNewline is a hack to let us force a newline at a certain position. +func (f *fumpter) addNewline(at token.Pos) { + offset := f.Offset(at) + + field := reflect.ValueOf(f.File).Elem().FieldByName("lines") + n := field.Len() + lines := make([]int, 0, n+1) + for i := 0; i < n; i++ { + cur := int(field.Index(i).Int()) + if offset == cur { + // This newline already exists; do nothing. Duplicate + // newlines can't exist. + return + } + if offset >= 0 && offset < cur { + lines = append(lines, offset) + offset = -1 + } + lines = append(lines, cur) + } + if offset >= 0 { + lines = append(lines, offset) + } + if !f.SetLines(lines) { + panic(fmt.Sprintf("could not set lines to %v", lines)) + } +} + +// removeLines removes all newlines between two positions, so that they end +// up on the same line. +func (f *fumpter) removeLines(fromLine, toLine int) { + for fromLine < toLine { + f.MergeLine(fromLine) + toLine-- + } +} + +// removeLinesBetween is like removeLines, but it leaves one newline between the +// two positions. +func (f *fumpter) removeLinesBetween(from, to token.Pos) { + f.removeLines(f.Line(from)+1, f.Line(to)) +} + +type byteCounter int + +func (b *byteCounter) Write(p []byte) (n int, err error) { + *b += byteCounter(len(p)) + return len(p), nil +} + +func (f *fumpter) printLength(node ast.Node) int { + var count byteCounter + if err := format.Node(&count, f.fset, node); err != nil { + panic(fmt.Sprintf("unexpected print error: %v", err)) + } + + // Add the space taken by an inline comment. + if c := f.inlineComment(node.End()); c != nil { + fmt.Fprintf(&count, " %s", c.Text) + } + + // Add an approximation of the indentation level. We can't know the + // number of tabs go/printer will add ahead of time. Trying to print the + // entire top-level declaration would tell us that, but then it's near + // impossible to reliably find our node again. + return int(count) + (f.blockLevel * 8) +} + +// rxCommentDirective covers all common Go comment directives: +// +// //go: | standard Go directives, like go:noinline +// //some-words: | similar to the syntax above, like lint:ignore or go-sumtype:decl +// //line | inserted line information for cmd/compile +// //export | to mark cgo funcs for exporting +// //extern | C function declarations for gccgo +// //sys(nb)? | syscall function wrapper prototypes +// //nolint | nolint directive for golangci +// +// Note that the "some-words:" matching expects a letter afterward, such as +// "go:generate", to prevent matching false positives like "https://site". +var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|nolint\b)`) + +// visit takes either an ast.Node or a []ast.Stmt. +func (f *fumpter) applyPre(c *astutil.Cursor) { + switch node := c.Node().(type) { + case *ast.File: + var lastMulti bool + var lastEnd token.Pos + for _, decl := range node.Decls { + pos := decl.Pos() + comments := f.commentsBetween(lastEnd, pos) + if len(comments) > 0 { + pos = comments[0].Pos() + } + + // multiline top-level declarations should be separated + multi := f.Line(pos) < f.Line(decl.End()) + if multi && lastMulti && f.Line(lastEnd)+1 == f.Line(pos) { + f.addNewline(lastEnd) + } + + lastMulti = multi + lastEnd = decl.End() + } + + // Join contiguous lone var/const/import lines; abort if there + // are empty lines or comments in between. + newDecls := make([]ast.Decl, 0, len(node.Decls)) + for i := 0; i < len(node.Decls); { + newDecls = append(newDecls, node.Decls[i]) + start, ok := node.Decls[i].(*ast.GenDecl) + if !ok || isCgoImport(start) { + i++ + continue + } + lastPos := start.Pos() + for i++; i < len(node.Decls); { + cont, ok := node.Decls[i].(*ast.GenDecl) + if !ok || cont.Tok != start.Tok || cont.Lparen != token.NoPos || + f.Line(lastPos) < f.Line(cont.Pos())-1 || isCgoImport(cont) { + break + } + start.Specs = append(start.Specs, cont.Specs...) + if c := f.inlineComment(cont.End()); c != nil { + // don't move an inline comment outside + start.Rparen = c.End() + } + lastPos = cont.Pos() + i++ + } + } + node.Decls = newDecls + + // Comments aren't nodes, so they're not walked by default. + groupLoop: + for _, group := range node.Comments { + for _, comment := range group.List { + body := strings.TrimPrefix(comment.Text, "//") + if body == comment.Text { + // /*-style comment + continue groupLoop + } + if rxCommentDirective.MatchString(body) { + // this line is a directive + continue groupLoop + } + r, _ := utf8.DecodeRuneInString(body) + if !unicode.IsLetter(r) && !unicode.IsNumber(r) && !unicode.IsSpace(r) { + // this line could be code like "//{" + continue groupLoop + } + } + // If none of the comment group's lines look like a + // directive or code, add spaces, if needed. + for _, comment := range group.List { + body := strings.TrimPrefix(comment.Text, "//") + r, _ := utf8.DecodeRuneInString(body) + if !unicode.IsSpace(r) { + comment.Text = "// " + strings.TrimPrefix(comment.Text, "//") + } + } + } + + case *ast.DeclStmt: + decl, ok := node.Decl.(*ast.GenDecl) + if !ok || decl.Tok != token.VAR || len(decl.Specs) != 1 { + break // e.g. const name = "value" + } + spec := decl.Specs[0].(*ast.ValueSpec) + if spec.Type != nil { + break // e.g. var name Type + } + tok := token.ASSIGN + names := make([]ast.Expr, len(spec.Names)) + for i, name := range spec.Names { + names[i] = name + if name.Name != "_" { + tok = token.DEFINE + } + } + c.Replace(&ast.AssignStmt{ + Lhs: names, + Tok: tok, + Rhs: spec.Values, + }) + + case *ast.GenDecl: + if node.Tok == token.IMPORT && node.Lparen.IsValid() { + f.joinStdImports(node) + } + + // Single var declarations shouldn't use parentheses, unless + // there's a comment on the grouped declaration. + if node.Tok == token.VAR && len(node.Specs) == 1 && + node.Lparen.IsValid() && node.Doc == nil { + specPos := node.Specs[0].Pos() + specEnd := node.Specs[0].End() + + if len(f.commentsBetween(node.TokPos, specPos)) > 0 { + // If the single spec has any comment, it must + // go before the entire declaration now. + node.TokPos = specPos + } else { + f.removeLines(f.Line(node.TokPos), f.Line(specPos)) + } + f.removeLines(f.Line(specEnd), f.Line(node.Rparen)) + + // Remove the parentheses. go/printer will automatically + // get rid of the newlines. + node.Lparen = token.NoPos + node.Rparen = token.NoPos + } + + case *ast.BlockStmt: + f.stmts(node.List) + comments := f.commentsBetween(node.Lbrace, node.Rbrace) + if len(node.List) == 0 && len(comments) == 0 { + f.removeLinesBetween(node.Lbrace, node.Rbrace) + break + } + + var sign *ast.FuncType + var cond ast.Expr + switch parent := c.Parent().(type) { + case *ast.FuncDecl: + sign = parent.Type + case *ast.FuncLit: + sign = parent.Type + case *ast.IfStmt: + cond = parent.Cond + case *ast.ForStmt: + cond = parent.Cond + } + + if len(node.List) > 1 && sign == nil { + // only if we have a single statement, or if + // it's a func body. + break + } + var bodyPos, bodyEnd token.Pos + + if len(node.List) > 0 { + bodyPos = node.List[0].Pos() + bodyEnd = node.List[len(node.List)-1].End() + } + if len(comments) > 0 { + if pos := comments[0].Pos(); !bodyPos.IsValid() || pos < bodyPos { + bodyPos = pos + } + if pos := comments[len(comments)-1].End(); !bodyPos.IsValid() || pos > bodyEnd { + bodyEnd = pos + } + } + + f.removeLinesBetween(bodyEnd, node.Rbrace) + + if cond != nil && f.Line(cond.Pos()) != f.Line(cond.End()) { + // The body is preceded by a multi-line condition, so an + // empty line can help readability. + return + } + if sign != nil { + var lastParam *ast.Field + if l := sign.Results; l != nil && len(l.List) > 0 { + lastParam = l.List[len(l.List)-1] + } else if l := sign.Params; l != nil && len(l.List) > 0 { + lastParam = l.List[len(l.List)-1] + } + endLine := f.Line(sign.End()) + if lastParam != nil && f.Line(sign.Pos()) != endLine && f.Line(lastParam.Pos()) == endLine { + // The body is preceded by a multi-line function + // signature, and the empty line helps readability. + return + } + } + + f.removeLinesBetween(node.Lbrace, bodyPos) + + case *ast.CompositeLit: + if len(node.Elts) == 0 { + // doesn't have elements + break + } + openLine := f.Line(node.Lbrace) + closeLine := f.Line(node.Rbrace) + if openLine == closeLine { + // all in a single line + break + } + + newlineAroundElems := false + newlineBetweenElems := false + lastLine := openLine + for i, elem := range node.Elts { + if f.Line(elem.Pos()) > lastLine { + if i == 0 { + newlineAroundElems = true + } else { + newlineBetweenElems = true + } + } + lastLine = f.Line(elem.End()) + } + if closeLine > lastLine { + newlineAroundElems = true + } + + if newlineBetweenElems || newlineAroundElems { + first := node.Elts[0] + if openLine == f.Line(first.Pos()) { + // We want the newline right after the brace. + f.addNewline(node.Lbrace + 1) + closeLine = f.Line(node.Rbrace) + } + last := node.Elts[len(node.Elts)-1] + if closeLine == f.Line(last.End()) { + // We want the newline right before the brace. + f.addNewline(node.Rbrace) + } + } + + // If there's a newline between any consecutive elements, there + // must be a newline between all composite literal elements. + if !newlineBetweenElems { + break + } + for i1, elem1 := range node.Elts { + i2 := i1 + 1 + if i2 >= len(node.Elts) { + break + } + elem2 := node.Elts[i2] + // TODO: do we care about &{}? + _, ok1 := elem1.(*ast.CompositeLit) + _, ok2 := elem2.(*ast.CompositeLit) + if !ok1 && !ok2 { + continue + } + if f.Line(elem1.End()) == f.Line(elem2.Pos()) { + f.addNewline(elem1.End()) + } + } + + case *ast.CaseClause: + f.stmts(node.Body) + openLine := f.Line(node.Case) + closeLine := f.Line(node.Colon) + if openLine == closeLine { + // nothing to do + break + } + if len(f.commentsBetween(node.Case, node.Colon)) > 0 { + // don't move comments + break + } + if f.printLength(node) > shortLineLimit { + // too long to collapse + break + } + f.removeLines(openLine, closeLine) + + case *ast.CommClause: + f.stmts(node.Body) + + case *ast.FieldList: + if node.NumFields() == 0 && f.inlineComment(node.Pos()) == nil { + // Empty field lists should not contain a newline. + // Do not join the two lines if the first has an inline + // comment, as that can result in broken formatting. + openLine := f.Line(node.Pos()) + closeLine := f.Line(node.End()) + f.removeLines(openLine, closeLine) + } + + // Merging adjacent fields (e.g. parameters) is disabled by default. + if !f.ExtraRules { + break + } + switch c.Parent().(type) { + case *ast.FuncDecl, *ast.FuncType, *ast.InterfaceType: + node.List = f.mergeAdjacentFields(node.List) + c.Replace(node) + case *ast.StructType: + // Do not merge adjacent fields in structs. + } + + case *ast.BasicLit: + // Octal number literals were introduced in 1.13. + if semver.Compare(f.LangVersion, "v1.13") >= 0 { + if node.Kind == token.INT && rxOctalInteger.MatchString(node.Value) { + node.Value = "0o" + node.Value[1:] + c.Replace(node) + } + } + } +} + +func (f *fumpter) stmts(list []ast.Stmt) { + for i, stmt := range list { + ifs, ok := stmt.(*ast.IfStmt) + if !ok || i < 1 { + continue // not an if following another statement + } + as, ok := list[i-1].(*ast.AssignStmt) + if !ok || as.Tok != token.DEFINE || + !identEqual(as.Lhs[len(as.Lhs)-1], "err") { + continue // not "..., err := ..." + } + be, ok := ifs.Cond.(*ast.BinaryExpr) + if !ok || ifs.Init != nil || ifs.Else != nil { + continue // complex if + } + if be.Op != token.NEQ || !identEqual(be.X, "err") || + !identEqual(be.Y, "nil") { + continue // not "err != nil" + } + f.removeLinesBetween(as.End(), ifs.Pos()) + } +} + +func identEqual(expr ast.Expr, name string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == name +} + +// isCgoImport returns true if the declaration is simply: +// +// import "C" +// +// Note that parentheses do not affect the result. +func isCgoImport(decl *ast.GenDecl) bool { + if decl.Tok != token.IMPORT || len(decl.Specs) != 1 { + return false + } + spec := decl.Specs[0].(*ast.ImportSpec) + return spec.Path.Value == `"C"` +} + +// joinStdImports ensures that all standard library imports are together and at +// the top of the imports list. +func (f *fumpter) joinStdImports(d *ast.GenDecl) { + var std, other []ast.Spec + firstGroup := true + lastEnd := d.Pos() + needsSort := false + for i, spec := range d.Specs { + spec := spec.(*ast.ImportSpec) + if coms := f.commentsBetween(lastEnd, spec.Pos()); len(coms) > 0 { + lastEnd = coms[len(coms)-1].End() + } + if i > 0 && firstGroup && f.Line(spec.Pos()) > f.Line(lastEnd)+1 { + firstGroup = false + } else { + // We're still in the first group, update lastEnd. + lastEnd = spec.End() + } + + path, _ := strconv.Unquote(spec.Path.Value) + switch { + // Imports with a period are definitely third party. + case strings.Contains(path, "."): + fallthrough + // "test" and "example" are reserved as per golang.org/issue/37641. + // "internal" is unreachable. + case strings.HasPrefix(path, "test/") || + strings.HasPrefix(path, "example/") || + strings.HasPrefix(path, "internal/"): + fallthrough + // To be conservative, if an import has a name or an inline + // comment, and isn't part of the top group, treat it as non-std. + case !firstGroup && (spec.Name != nil || spec.Comment != nil): + other = append(other, spec) + continue + } + + // If we're moving this std import further up, reset its + // position, to avoid breaking comments. + if !firstGroup || len(other) > 0 { + setPos(reflect.ValueOf(spec), d.Pos()) + needsSort = true + } + std = append(std, spec) + } + // Ensure there is an empty line between std imports and other imports. + if len(std) > 0 && len(other) > 0 && f.Line(std[len(std)-1].End())+1 >= f.Line(other[0].Pos()) { + // We add two newlines, as that's necessary in some edge cases. + // For example, if the std and non-std imports were together and + // without indentation, adding one newline isn't enough. Two + // empty lines will be printed as one by go/printer, anyway. + f.addNewline(other[0].Pos() - 1) + f.addNewline(other[0].Pos()) + } + // Finally, join the imports, keeping std at the top. + d.Specs = append(std, other...) + + // If we moved any std imports to the first group, we need to sort them + // again. + if needsSort { + ast.SortImports(f.fset, f.astFile) + } +} + +// mergeAdjacentFields returns fields with adjacent fields merged if possible. +func (f *fumpter) mergeAdjacentFields(fields []*ast.Field) []*ast.Field { + // If there are less than two fields then there is nothing to merge. + if len(fields) < 2 { + return fields + } + + // Otherwise, iterate over adjacent pairs of fields, merging if possible, + // and mutating fields. Elements of fields may be mutated (if merged with + // following fields), discarded (if merged with a preceeding field), or left + // unchanged. + i := 0 + for j := 1; j < len(fields); j++ { + if f.shouldMergeAdjacentFields(fields[i], fields[j]) { + fields[i].Names = append(fields[i].Names, fields[j].Names...) + } else { + i++ + fields[i] = fields[j] + } + } + return fields[:i+1] +} + +func (f *fumpter) shouldMergeAdjacentFields(f1, f2 *ast.Field) bool { + if len(f1.Names) == 0 || len(f2.Names) == 0 { + // Both must have names for the merge to work. + return false + } + if f.Line(f1.Pos()) != f.Line(f2.Pos()) { + // Trust the user if they used separate lines. + return false + } + + // Only merge if the types are equal. + opt := cmp.Comparer(func(x, y token.Pos) bool { return true }) + return cmp.Equal(f1.Type, f2.Type, opt) +} + +var posType = reflect.TypeOf(token.NoPos) + +// setPos recursively sets all position fields in the node v to pos. +func setPos(v reflect.Value, pos token.Pos) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if !v.IsValid() { + return + } + if v.Type() == posType { + v.Set(reflect.ValueOf(pos)) + } + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + setPos(v.Field(i), pos) + } + } +} diff --git a/vendor/mvdan.cc/gofumpt/go.mod b/vendor/mvdan.cc/gofumpt/go.mod new file mode 100644 index 0000000000..051ff53c13 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/go.mod @@ -0,0 +1,11 @@ +module mvdan.cc/gofumpt + +go 1.14 + +require ( + github.com/google/go-cmp v0.5.4 + github.com/rogpeppe/go-internal v1.6.2 + golang.org/x/mod v0.4.0 + golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect +) diff --git a/vendor/mvdan.cc/gofumpt/go.sum b/vendor/mvdan.cc/gofumpt/go.sum new file mode 100644 index 0000000000..e88027cd04 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/go.sum @@ -0,0 +1,40 @@ +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c h1:dS09fXwOFF9cXBnIzZexIuUBj95U1NyQjkEhkgidDow= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/vendor/mvdan.cc/gofumpt/gofmt.go b/vendor/mvdan.cc/gofumpt/gofmt.go new file mode 100644 index 0000000000..c040e775af --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/gofmt.go @@ -0,0 +1,323 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/scanner" + "go/token" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + + gformat "mvdan.cc/gofumpt/format" + "mvdan.cc/gofumpt/internal/diff" +) + +var ( + // main operation modes + list = flag.Bool("l", false, "list files whose formatting differs from gofumpt's") + write = flag.Bool("w", false, "write result to (source) file instead of stdout") + rewriteRule = flag.String("r", "", "rewrite rule (e.g., 'a[b:len(a)] -> a[b:]')") + simplifyAST = flag.Bool("s", false, "simplify code") + doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") + allErrors = flag.Bool("e", false, "report all errors (not just the first 10 on different lines)") + + // debugging + cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") +) + +// Keep these in sync with go/format/format.go. +const ( + tabWidth = 8 + printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers + + // printerNormalizeNumbers means to canonicalize number literal prefixes + // and exponents while printing. See https://golang.org/doc/go1.13#gofumpt. + // + // This value is defined in go/printer specifically for go/format and cmd/gofumpt. + printerNormalizeNumbers = 1 << 30 +) + +var ( + fileSet = token.NewFileSet() // per process FileSet + exitCode = 0 + rewrite func(*ast.File) *ast.File + parserMode parser.Mode +) + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: gofumpt [flags] [path ...]\n") + flag.PrintDefaults() +} + +func initParserMode() { + parserMode = parser.ParseComments + if *allErrors { + parserMode |= parser.AllErrors + } +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error { + var perm os.FileMode = 0o644 + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return err + } + in = f + perm = fi.Mode().Perm() + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + file, sourceAdj, indentAdj, err := parse(fileSet, filename, src, stdin) + if err != nil { + return err + } + + if rewrite != nil { + if sourceAdj == nil { + file = rewrite(file) + } else { + fmt.Fprintf(os.Stderr, "warning: rewrite ignored for incomplete programs\n") + } + } + + ast.SortImports(fileSet, file) + + if *simplifyAST { + simplify(file) + } + + // Apply gofumpt's changes before we print the code in gofumpt's + // format. + if *langVersion == "" { + out, err := exec.Command("go", "list", "-m", "-f", "{{.GoVersion}}").Output() + out = bytes.TrimSpace(out) + if err == nil && len(out) > 0 { + *langVersion = string(out) + } + } + gformat.File(fileSet, file, gformat.Options{ + LangVersion: *langVersion, + ExtraRules: *extraRules, + }) + + res, err := format(fileSet, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(out, filename) + } + if *write { + // make a temporary backup before overwriting original + bakname, err := backupFile(filename+".", src, perm) + if err != nil { + return err + } + err = ioutil.WriteFile(filename, res, perm) + if err != nil { + os.Rename(bakname, filename) + return err + } + err = os.Remove(bakname) + if err != nil { + return err + } + } + if *doDiff { + data, err := diffWithReplaceTempFile(src, res, filename) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + out.Write(data) + } + } + + if !*list && !*write && !*doDiff { + _, err = out.Write(res) + } + + return err +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, nil, os.Stdout, false) + } + // Don't complain if a file was deleted in the meantime (i.e. + // the directory changed concurrently while running gofumpt). + if err != nil && !os.IsNotExist(err) { + report(err) + } + return nil +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func main() { + // call gofumptMain in a separate function + // so that it can use defer and have them + // run before the exit. + gofumptMain() + os.Exit(exitCode) +} + +func gofumptMain() { + flag.Usage = usage + flag.Parse() + + // Print the gofumpt version if the user asks for it. + if *showVersion { + printVersion() + return + } + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Fprintf(os.Stderr, "creating cpu profile: %s\n", err) + exitCode = 2 + return + } + defer f.Close() + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + initParserMode() + initRewrite() + + if flag.NArg() == 0 { + if *write { + fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input") + exitCode = 2 + return + } + if err := processFile("", os.Stdin, os.Stdout, true); err != nil { + report(err) + } + return + } + + for i := 0; i < flag.NArg(); i++ { + path := flag.Arg(i) + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, nil, os.Stdout, false); err != nil { + report(err) + } + } + } +} + +func diffWithReplaceTempFile(b1, b2 []byte, filename string) ([]byte, error) { + data, err := diff.Diff("gofumpt", b1, b2) + if len(data) > 0 { + return replaceTempFilename(data, filename) + } + return data, err +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofumpt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofumpt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +const chmodSupported = runtime.GOOS != "windows" + +// backupFile writes data to a new file named filename with permissions perm, +// with 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +// isSpace reports whether the byte is a space character. +// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} diff --git a/vendor/mvdan.cc/gofumpt/internal/diff/diff.go b/vendor/mvdan.cc/gofumpt/internal/diff/diff.go new file mode 100644 index 0000000000..e9d2c23780 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/diff/diff.go @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff implements a Diff function that compare two inputs +// using the 'diff' tool. +package diff + +import ( + "io/ioutil" + "os" + "os/exec" + "runtime" +) + +// Returns diff of two arrays of bytes in diff tool format. +func Diff(prefix string, b1, b2 []byte) ([]byte, error) { + f1, err := writeTempFile(prefix, b1) + if err != nil { + return nil, err + } + defer os.Remove(f1) + + f2, err := writeTempFile(prefix, b2) + if err != nil { + return nil, err + } + defer os.Remove(f2) + + cmd := "diff" + if runtime.GOOS == "plan9" { + cmd = "/bin/ape/diff" + } + + data, err := exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return data, err +} + +func writeTempFile(prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} diff --git a/vendor/mvdan.cc/gofumpt/rewrite.go b/vendor/mvdan.cc/gofumpt/rewrite.go new file mode 100644 index 0000000000..bab22e04cd --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/rewrite.go @@ -0,0 +1,309 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +func initRewrite() { + if *rewriteRule == "" { + rewrite = nil // disable any previous rewrite + return + } + f := strings.Split(*rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + rewrite = func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + val = apply(rewriteVal, val) + for k := range m { + delete(m, k) + } + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match reports whether pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + if p.IsNil() { + // Do not turn nil slices into empty slices. go/ast + // guarantees that certain lists will be nil if not + // populated. + return reflect.Zero(p.Type()) + } + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/vendor/mvdan.cc/gofumpt/simplify.go b/vendor/mvdan.cc/gofumpt/simplify.go new file mode 100644 index 0000000000..1a0e8174af --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/simplify.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "go/token" + "reflect" +) + +type simplifier struct{} + +func (s simplifier) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.CompositeLit: + // array, slice, and map composite literals may be simplified + outer := n + var keyType, eltType ast.Expr + switch typ := outer.Type.(type) { + case *ast.ArrayType: + eltType = typ.Elt + case *ast.MapType: + keyType = typ.Key + eltType = typ.Value + } + + if eltType != nil { + var ktyp reflect.Value + if keyType != nil { + ktyp = reflect.ValueOf(keyType) + } + typ := reflect.ValueOf(eltType) + for i, x := range outer.Elts { + px := &outer.Elts[i] + // look at value of indexed/named elements + if t, ok := x.(*ast.KeyValueExpr); ok { + if keyType != nil { + s.simplifyLiteral(ktyp, keyType, t.Key, &t.Key) + } + x = t.Value + px = &t.Value + } + s.simplifyLiteral(typ, eltType, x, px) + } + // node was simplified - stop walk (there are no subnodes to simplify) + return nil + } + + case *ast.SliceExpr: + // a slice expression of the form: s[a:len(s)] + // can be simplified to: s[a:] + // if s is "simple enough" (for now we only accept identifiers) + // + // Note: This may not be correct because len may have been redeclared in another + // file belonging to the same package. However, this is extremely unlikely + // and so far (April 2016, after years of supporting this rewrite feature) + // has never come up, so let's keep it working as is (see also #15153). + if n.Max != nil { + // - 3-index slices always require the 2nd and 3rd index + break + } + if s, _ := n.X.(*ast.Ident); s != nil && s.Obj != nil { + // the array/slice object is a single, resolved identifier + if call, _ := n.High.(*ast.CallExpr); call != nil && len(call.Args) == 1 && !call.Ellipsis.IsValid() { + // the high expression is a function call with a single argument + if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" && fun.Obj == nil { + // the function called is "len" and it is not locally defined; and + // because we don't have dot imports, it must be the predefined len() + if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Obj == s.Obj { + // the len argument is the array/slice object + n.High = nil + } + } + } + } + // Note: We could also simplify slice expressions of the form s[0:b] to s[:b] + // but we leave them as is since sometimes we want to be very explicit + // about the lower bound. + // An example where the 0 helps: + // x, y, z := b[0:2], b[2:4], b[4:6] + // An example where it does not: + // x, y := b[:n], b[n:] + + case *ast.RangeStmt: + // - a range of the form: for x, _ = range v {...} + // can be simplified to: for x = range v {...} + // - a range of the form: for _ = range v {...} + // can be simplified to: for range v {...} + if isBlank(n.Value) { + n.Value = nil + } + if isBlank(n.Key) && n.Value == nil { + n.Key = nil + } + } + + return s +} + +func (s simplifier) simplifyLiteral(typ reflect.Value, astType, x ast.Expr, px *ast.Expr) { + ast.Walk(s, x) // simplify x + + // if the element is a composite literal and its literal type + // matches the outer literal's element type exactly, the inner + // literal type may be omitted + if inner, ok := x.(*ast.CompositeLit); ok { + if match(nil, typ, reflect.ValueOf(inner.Type)) { + inner.Type = nil + } + } + // if the outer literal's element type is a pointer type *T + // and the element is & of a composite literal of type T, + // the inner &T may be omitted. + if ptr, ok := astType.(*ast.StarExpr); ok { + if addr, ok := x.(*ast.UnaryExpr); ok && addr.Op == token.AND { + if inner, ok := addr.X.(*ast.CompositeLit); ok { + if match(nil, reflect.ValueOf(ptr.X), reflect.ValueOf(inner.Type)) { + inner.Type = nil // drop T + *px = inner // drop & + } + } + } + } +} + +func isBlank(x ast.Expr) bool { + ident, ok := x.(*ast.Ident) + return ok && ident.Name == "_" +} + +func simplify(f *ast.File) { + // remove empty declarations such as "const ()", etc + removeEmptyDeclGroups(f) + + var s simplifier + ast.Walk(s, f) +} + +func removeEmptyDeclGroups(f *ast.File) { + i := 0 + for _, d := range f.Decls { + if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) { + f.Decls[i] = d + i++ + } + } + f.Decls = f.Decls[:i] +} + +func isEmpty(f *ast.File, g *ast.GenDecl) bool { + if g.Doc != nil || g.Specs != nil { + return false + } + + for _, c := range f.Comments { + // if there is a comment in the declaration, it is not considered empty + if g.Pos() <= c.Pos() && c.End() <= g.End() { + return false + } + } + + return true +} diff --git a/vendor/mvdan.cc/gofumpt/version.go b/vendor/mvdan.cc/gofumpt/version.go new file mode 100644 index 0000000000..b70ee78561 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/version.go @@ -0,0 +1,28 @@ +// Copyright (c) 2020, Daniel Martí +// See LICENSE for licensing information + +package main + +import ( + "flag" + "fmt" + "runtime/debug" +) + +var ( + showVersion = flag.Bool("version", false, "show version and exit") + + version = "(devel)" // to match the default from runtime/debug +) + +func printVersion() { + // don't overwrite the version if it was set by -ldflags=-X + if info, ok := debug.ReadBuildInfo(); ok && version == "(devel)" { + mod := &info.Main + if mod.Replace != nil { + mod = mod.Replace + } + version = mod.Version + } + fmt.Println(version) +}