Bundle MongoDB Atlas (#8309)

This commit is contained in:
Jim Kalafut
2020-02-07 14:09:39 -08:00
committed by GitHub
parent 62be2a0e37
commit 264c446de6
106 changed files with 9175 additions and 8447 deletions

View File

@@ -367,6 +367,8 @@ func TestPredict_Plugins(t *testing.T) {
"ldap",
"mongodb",
"mongodb-database-plugin",
"mongodbatlas",
"mongodbatlas-database-plugin",
"mssql",
"mssql-database-plugin",
"mysql",
@@ -412,7 +414,7 @@ func TestPredict_Plugins(t *testing.T) {
}
}
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
t.Errorf("expected:%q, got: %q", tc.exp, act)
}
})
}

10
go.mod
View File

@@ -29,7 +29,6 @@ require (
github.com/cockroachdb/apd v1.1.0 // indirect
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
github.com/coreos/go-semver v0.2.0
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a
github.com/dnaeon/go-vcr v1.0.1 // indirect
github.com/dsnet/compress v0.0.1 // indirect
@@ -43,14 +42,12 @@ require (
github.com/go-ldap/ldap/v3 v3.1.3
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-sql-driver/mysql v1.4.1
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-test/deep v1.0.2
github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df
github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.3.2
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a
github.com/google/go-querystring v1.0.0 // indirect
github.com/hashicorp/consul-template v0.22.0
github.com/hashicorp/consul/api v1.2.1-0.20200128105449-6681be918a6e
github.com/hashicorp/errwrap v1.0.0
@@ -81,14 +78,16 @@ require (
github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.2-0.20190925162726-2e5b0b8184e6
github.com/hashicorp/vault-plugin-auth-oci v0.0.0-20190904175623-97c0c0187c5c
github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190814210117-e079e01fbb93
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.0.0-20200124191841-a128333dc5d5
github.com/hashicorp/vault-plugin-secrets-ad v0.6.2
github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56
github.com/hashicorp/vault-plugin-secrets-azure v0.5.3-0.20191119150734-45c076c82f1d
github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191128235941-556b7c55d6db
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf5de6e
github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0
github.com/hashicorp/vault/api v1.0.5-0.20200111014044-ba76c080ad1f
github.com/hashicorp/vault/sdk v0.1.14-0.20200123192413-777c45062569
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.0.0-20200124190647-0026e6bed4fb
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
github.com/jackc/pgx v3.3.0+incompatible // indirect
@@ -98,7 +97,6 @@ require (
github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869
github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f
github.com/kr/pretty v0.1.0
github.com/kr/pty v1.1.3 // indirect
github.com/kr/text v0.1.0
github.com/lib/pq v1.2.0
github.com/mattn/go-colorable v0.1.4

11
go.sum
View File

@@ -59,6 +59,8 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEV
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE=
github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM=
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
@@ -219,6 +221,7 @@ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZp
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
@@ -418,6 +421,8 @@ github.com/hashicorp/vault-plugin-auth-oci v0.0.0-20190904175623-97c0c0187c5c h1
github.com/hashicorp/vault-plugin-auth-oci v0.0.0-20190904175623-97c0c0187c5c/go.mod h1:YAl51RsYRihPbSdnug1NsvutzbRVfrZ12FjEIvSiOTs=
github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190814210117-e079e01fbb93 h1:kXTV1ImOPgDGZxAlbEQfiXgnZY/34vfgnZVhI/tscmg=
github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190814210117-e079e01fbb93/go.mod h1:N9XpfMXjeLHBgUd8iy4avOC4mCSqUC7B/R8AtCYhcfE=
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.0.0-20200124191841-a128333dc5d5 h1:QcgGRCPzwvyIv3xTUlnTVDKdy/K37VNK28RfNtd8YG0=
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.0.0-20200124191841-a128333dc5d5/go.mod h1:pAqbRxavgtKO0VR8AD9jRRFc4VldXGbXA8WSXo9L+oY=
github.com/hashicorp/vault-plugin-secrets-ad v0.6.2 h1:gz3TolHTqE+HitO5FXwrDhXeZy9G5L3wR1h4DMtEV4Q=
github.com/hashicorp/vault-plugin-secrets-ad v0.6.2/go.mod h1:beQFPnMIABuWeeCVPFbSAWmJgm3kcGqpswGOGOiJjYc=
github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56 h1:PGE26//x1eiAbZ1ExffhKa4y9xgDKLd9BHDZRkOzbEY=
@@ -430,6 +435,8 @@ github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf5de6e/go.mod h1:5prAHuCcBiyv+xfGBviTVYeDQUhmQYN7WrxC2gMRWeQ=
github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0 h1:w4qR/yfqWOYmncR1HK1CVU7iHkqgcf0USWtbp/fTHM4=
github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0/go.mod h1:H0VKQagsJoK9o2qpULMgbspuWVnFe3G4S/K7f0Dr8qY=
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.0.0-20200124190647-0026e6bed4fb h1:mKgSR5EimaRgFzdV1ld5+k1l+zzpJ3x7T+eTHNxxKuo=
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.0.0-20200124190647-0026e6bed4fb/go.mod h1:O+csRy1tVsA/LEvBUgRNM3GUml6TFnVBld+HRciLAdg=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
@@ -550,6 +557,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mongodb/go-client-mongodb-atlas v0.1.2 h1:qmUme1TlQBPZupmXMnpD8DxnfGXLVGs3w+0Z17HBiSA=
github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU=
github.com/mwielbut/pointy v1.1.0 h1:U5/YEfoIkaGCHv0St3CgjduqXID4FNRoyZgLM1kY9vg=
github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=

View File

@@ -1,10 +1,6 @@
package builtinplugins
import (
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/logical"
credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud"
credAzure "github.com/hashicorp/vault-plugin-auth-azure"
credCentrify "github.com/hashicorp/vault-plugin-auth-centrify"
@@ -14,6 +10,8 @@ import (
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
credAppId "github.com/hashicorp/vault/builtin/credential/app-id"
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
credAws "github.com/hashicorp/vault/builtin/credential/aws"
@@ -23,8 +21,6 @@ import (
credOkta "github.com/hashicorp/vault/builtin/credential/okta"
credRadius "github.com/hashicorp/vault/builtin/credential/radius"
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
dbCass "github.com/hashicorp/vault/plugins/database/cassandra"
dbHana "github.com/hashicorp/vault/plugins/database/hana"
dbInflux "github.com/hashicorp/vault/plugins/database/influxdb"
@@ -32,6 +28,9 @@ import (
dbMssql "github.com/hashicorp/vault/plugins/database/mssql"
dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/logical"
logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin"
logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud"
@@ -39,6 +38,7 @@ import (
logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin"
logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms"
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas"
logicalAws "github.com/hashicorp/vault/builtin/logical/aws"
logicalCass "github.com/hashicorp/vault/builtin/logical/cassandra"
logicalConsul "github.com/hashicorp/vault/builtin/logical/consul"
@@ -100,6 +100,7 @@ func newRegistry() *registry {
"mssql-database-plugin": dbMssql.New,
"cassandra-database-plugin": dbCass.New,
"mongodb-database-plugin": dbMongo.New,
"mongodbatlas-database-plugin": dbMongoAtlas.New,
"hana-database-plugin": dbHana.New,
"influxdb-database-plugin": dbInflux.New,
"elasticsearch-database-plugin": dbElastic.New,
@@ -115,6 +116,7 @@ func newRegistry() *registry {
"gcpkms": logicalGcpKms.Factory,
"kv": logicalKv.Factory,
"mongodb": logicalMongo.Factory,
"mongodbatlas": logicalMongoAtlas.Factory,
"mssql": logicalMssql.Factory,
"mysql": logicalMysql.Factory,
"nomad": logicalNomad.Factory,

View File

@@ -1841,6 +1841,7 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
"mssql-database-plugin",
"cassandra-database-plugin",
"mongodb-database-plugin",
"mongodbatlas-database-plugin",
"hana-database-plugin",
"influxdb-database-plugin",
}

17
vendor/github.com/Sectorbob/mlab-ns2/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,17 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the mlab-ns2
# repository.
#
# Names should be added to this file like so:
# Name <email address>
#
# An entry with two email addresses specifies that the
# first address should be used in the submit logs and
# that the second address should be recognized as the
# same person when interacting with Rietveld.
# Please keep the list sorted.
Bipasa Chattopadhyay <bipasa@cs.unc.edu>
Eric Gavaletz <gavaletz@gmail.com>
Seon-Wook Park <seon.wook@swook.net>

177
vendor/github.com/Sectorbob/mlab-ns2/COPYING generated vendored Normal file
View File

@@ -0,0 +1,177 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@@ -0,0 +1,284 @@
// Copyright 2013 M-Lab
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The digest package provides an implementation of http.RoundTripper that takes
// care of HTTP Digest Authentication (http://www.ietf.org/rfc/rfc2617.txt).
// This only implements the MD5 and "auth" portions of the RFC, but that covers
// the majority of avalible server side implementations including apache web
// server.
//
// Example usage:
//
// t := NewTransport("myUserName", "myP@55w0rd")
// req, err := http.NewRequest("GET", "http://notreal.com/path?arg=1", nil)
// if err != nil {
// return err
// }
// resp, err := t.RoundTrip(req)
// if err != nil {
// return err
// }
//
// OR it can be used as a client:
//
// c, err := t.Client()
// if err != nil {
// return err
// }
// resp, err := c.Get("http://notreal.com/path?arg=1")
// if err != nil {
// return err
// }
//
package digest
import (
"bytes"
"crypto/md5"
"crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
)
var (
ErrNilTransport = errors.New("Transport is nil")
ErrBadChallenge = errors.New("Challenge is bad")
ErrAlgNotImplemented = errors.New("Alg not implemented")
)
// Transport is an implementation of http.RoundTripper that takes care of http
// digest authentication.
type Transport struct {
Username string
Password string
Transport http.RoundTripper
}
// NewTransport creates a new digest transport using the http.DefaultTransport.
func NewTransport(username, password string) *Transport {
t := &Transport{
Username: username,
Password: password,
}
t.Transport = http.DefaultTransport
return t
}
type challenge struct {
Realm string
Domain string
Nonce string
Opaque string
Stale string
Algorithm string
Qop string
}
func parseChallenge(input string) (*challenge, error) {
const ws = " \n\r\t"
const qs = `"`
s := strings.Trim(input, ws)
if !strings.HasPrefix(s, "Digest ") {
return nil, ErrBadChallenge
}
s = strings.Trim(s[7:], ws)
sl := strings.Split(s, ", ")
c := &challenge{
Algorithm: "MD5",
}
var r []string
for i := range sl {
r = strings.SplitN(sl[i], "=", 2)
switch r[0] {
case "realm":
c.Realm = strings.Trim(r[1], qs)
case "domain":
c.Domain = strings.Trim(r[1], qs)
case "nonce":
c.Nonce = strings.Trim(r[1], qs)
case "opaque":
c.Opaque = strings.Trim(r[1], qs)
case "stale":
c.Stale = strings.Trim(r[1], qs)
case "algorithm":
c.Algorithm = strings.Trim(r[1], qs)
case "qop":
//TODO(gavaletz) should be an array of strings?
c.Qop = strings.Trim(r[1], qs)
default:
return nil, ErrBadChallenge
}
}
return c, nil
}
type credentials struct {
Username string
Realm string
Nonce string
DigestURI string
Algorithm string
Cnonce string
Opaque string
MessageQop string
NonceCount int
method string
password string
}
func h(data string) string {
hf := md5.New()
io.WriteString(hf, data)
return fmt.Sprintf("%x", hf.Sum(nil))
}
func kd(secret, data string) string {
return h(fmt.Sprintf("%s:%s", secret, data))
}
func (c *credentials) ha1() string {
return h(fmt.Sprintf("%s:%s:%s", c.Username, c.Realm, c.password))
}
func (c *credentials) ha2() string {
return h(fmt.Sprintf("%s:%s", c.method, c.DigestURI))
}
func (c *credentials) resp(cnonce string) (string, error) {
c.NonceCount++
if c.MessageQop == "auth" {
if cnonce != "" {
c.Cnonce = cnonce
} else {
b := make([]byte, 8)
io.ReadFull(rand.Reader, b)
c.Cnonce = fmt.Sprintf("%x", b)[:16]
}
return kd(c.ha1(), fmt.Sprintf("%s:%08x:%s:%s:%s",
c.Nonce, c.NonceCount, c.Cnonce, c.MessageQop, c.ha2())), nil
} else if c.MessageQop == "" {
return kd(c.ha1(), fmt.Sprintf("%s:%s", c.Nonce, c.ha2())), nil
}
return "", ErrAlgNotImplemented
}
func (c *credentials) authorize() (string, error) {
// Note that this is only implemented for MD5 and NOT MD5-sess.
// MD5-sess is rarely supported and those that do are a big mess.
if c.Algorithm != "MD5" {
return "", ErrAlgNotImplemented
}
// Note that this is NOT implemented for "qop=auth-int". Similarly the
// auth-int server side implementations that do exist are a mess.
if c.MessageQop != "auth" && c.MessageQop != "" {
return "", ErrAlgNotImplemented
}
resp, err := c.resp("")
if err != nil {
return "", ErrAlgNotImplemented
}
sl := []string{fmt.Sprintf(`username="%s"`, c.Username)}
sl = append(sl, fmt.Sprintf(`realm="%s"`, c.Realm))
sl = append(sl, fmt.Sprintf(`nonce="%s"`, c.Nonce))
sl = append(sl, fmt.Sprintf(`uri="%s"`, c.DigestURI))
sl = append(sl, fmt.Sprintf(`response="%s"`, resp))
if c.Algorithm != "" {
sl = append(sl, fmt.Sprintf(`algorithm="%s"`, c.Algorithm))
}
if c.Opaque != "" {
sl = append(sl, fmt.Sprintf(`opaque="%s"`, c.Opaque))
}
if c.MessageQop != "" {
sl = append(sl, fmt.Sprintf("qop=%s", c.MessageQop))
sl = append(sl, fmt.Sprintf("nc=%08x", c.NonceCount))
sl = append(sl, fmt.Sprintf(`cnonce="%s"`, c.Cnonce))
}
return fmt.Sprintf("Digest %s", strings.Join(sl, ", ")), nil
}
func (t *Transport) newCredentials(req *http.Request, c *challenge) *credentials {
return &credentials{
Username: t.Username,
Realm: c.Realm,
Nonce: c.Nonce,
DigestURI: req.URL.RequestURI(),
Algorithm: c.Algorithm,
Opaque: c.Opaque,
MessageQop: c.Qop, // "auth" must be a single value
NonceCount: 0,
method: req.Method,
password: t.Password,
}
}
// RoundTrip makes a request expecting a 401 response that will require digest
// authentication. It creates the credentials it needs and makes a follow-up
// request.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
if t.Transport == nil {
return nil, ErrNilTransport
}
// Cache Original Body
var cachedBody []byte
if req.Body != nil {
cachedBody, _ = ioutil.ReadAll(req.Body)
req.Body = ioutil.NopCloser(bytes.NewReader(cachedBody))
}
// Copy the request so we don't modify the input.
req2 := new(http.Request)
*req2 = *req
req2.Header = make(http.Header)
for k, s := range req.Header {
req2.Header[k] = s
}
// Make a request to get the 401 that contains the challenge.
resp, err := t.Transport.RoundTrip(req)
if err != nil || resp.StatusCode != 401 {
return resp, err
}
chal := resp.Header.Get("WWW-Authenticate")
c, err := parseChallenge(chal)
if err != nil {
return resp, err
}
// Form credentials based on the challenge.
cr := t.newCredentials(req2, c)
auth, err := cr.authorize()
if err != nil {
return resp, err
}
// Make authenticated request.
req2.Header.Set("Authorization", auth)
if len(cachedBody) > 0 {
req2.Body = ioutil.NopCloser(bytes.NewReader(cachedBody))
}
return t.Transport.RoundTrip(req2)
}
// Client returns an HTTP client that uses the digest transport.
func (t *Transport) Client() (*http.Client, error) {
if t.Transport == nil {
return nil, ErrNilTransport
}
return &http.Client{Transport: t}, nil
}

View File

@@ -0,0 +1,35 @@
// Copyright 2013 M-Lab
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build appengine
package digest
import (
"appengine"
"appengine/urlfetch"
)
// GAETransport returns an implementation of http.RoundTripper that uses the GAE
// urlfetch.Transport and is capable of HTTP digest authentication.
func GAETransport(c appengine.Context, username, password string) *Transport {
t := &Transport{
Username: username,
Password: password,
}
t.Transport = &urlfetch.Transport{
Context: c,
}
return t
}

View File

@@ -1,115 +0,0 @@
// +build ignore
package main
import (
"bytes"
"go/format"
"io/ioutil"
"log"
"net/http"
"sort"
"strings"
"text/template"
"time"
"gopkg.in/yaml.v2"
)
type CFCode int
type HTTPCode int
type Definition struct {
CFCode `yaml:"-"`
Name string `yaml:"name"`
HTTPCode `yaml:"http_code"`
Message string `yaml:"message"`
}
func main() {
const url = "https://raw.githubusercontent.com/cloudfoundry/cloud_controller_ng/master/vendor/errors/v2.yml"
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var m map[CFCode]Definition
if err := yaml.Unmarshal(body, &m); err != nil {
log.Fatal(err)
}
var definitions []Definition
for c, d := range m {
d.CFCode = c
definitions = append(definitions, d)
}
sort.Slice(definitions, func(i, j int) bool {
return definitions[i].CFCode < definitions[j].CFCode
})
buf := &bytes.Buffer{}
if err := packageTemplate.Execute(buf, struct {
Timestamp time.Time
Definitions []Definition
}{
Timestamp: time.Now(),
Definitions: definitions,
}); err != nil {
log.Fatal(err)
}
dst, err := format.Source(buf.Bytes())
if err != nil {
log.Printf("%s", buf.Bytes())
log.Fatal(err)
}
if err := ioutil.WriteFile("cf_error.go", dst, 0600); err != nil {
log.Fatal(err)
}
}
// destutter ensures that s does not end in "Error".
func destutter(s string) string {
return strings.TrimSuffix(s, "Error")
}
var packageTemplate = template.Must(template.New("").Funcs(template.FuncMap{
"destutter": destutter,
}).Parse(`
package cfclient
// Code generated by go generate. DO NOT EDIT.
// This file was generated by robots at
// {{ .Timestamp }}
import "github.com/pkg/errors"
{{- range .Definitions }}
{{$method := printf "Is%sError" (.Name | destutter) }}
// {{ $method }} returns a boolean indicating whether
// the error is known to report the Cloud Foundry error:
// - Cloud Foundry code: {{ .CFCode }}
// - HTTP code: {{ .HTTPCode }}
// - message: {{ printf "%q" .Message }}
func Is{{ .Name | destutter }}Error(err error) bool {
cause := errors.Cause(err)
cferr, ok := cause.(CloudFoundryError)
if !ok {
return false
}
return cferr.Code == {{ .CFCode }}
}
{{- end }}
`))

View File

@@ -1,703 +0,0 @@
// Copyright 2017, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build ignore
package main
import (
"bytes"
"go/format"
"io/ioutil"
"log"
"os"
"text/template"
)
func main() {
if len(os.Args) != 3 {
log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0])
}
typ := os.Args[1]
path := os.Args[2]
b := new(bytes.Buffer)
t := template.Must(template.New("source").Parse(source))
if err := t.Execute(b, struct {
Type, GeneratedMessage string
}{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil {
log.Fatalf("Template.Execute error: %v", err)
}
out, err := format.Source(bytes.TrimSpace(b.Bytes()))
if err != nil {
log.Fatalf("format.Source error: %v", err)
}
if err := ioutil.WriteFile(path, out, 0644); err != nil {
log.Fatalf("ioutil.WriteFile error: %v", err)
}
}
const source = `
// Copyright 2015, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
{{.GeneratedMessage}}
// ====================================================
// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
// ====================================================
package sais
func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) {
var i int
for i = 0; i < k; i++ {
C[i] = 0
}
for i = 0; i < n; i++ {
C[T[i]]++
}
}
func getBuckets_{{.Type}}(C, B []int, k int, end bool) {
var i, sum int
if end {
for i = 0; i < k; i++ {
sum += C[i]
B[i] = sum
}
} else {
for i = 0; i < k; i++ {
sum += C[i]
B[i] = sum - C[i]
}
}
}
func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
var b, i, j int
var c0, c1 int
// Compute SAl.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
j = n - 1
c1 = int(T[j])
b = B[c1]
j--
if int(T[j]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
for i = 0; i < n; i++ {
if j = SA[i]; j > 0 {
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
if int(T[j]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
SA[i] = 0
} else if j < 0 {
SA[i] = ^j
}
}
// Compute SAs.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
c1 = 0
b = B[c1]
for i = n - 1; i >= 0; i-- {
if j = SA[i]; j > 0 {
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
b--
if int(T[j]) > c1 {
SA[b] = ^(j + 1)
} else {
SA[b] = j
}
SA[i] = 0
}
}
}
func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int {
var i, j, p, q, plen, qlen, name int
var c0, c1 int
var diff bool
// Compact all the sorted substrings into the first m items of SA.
// 2*m must be not larger than n (provable).
for i = 0; SA[i] < 0; i++ {
SA[i] = ^SA[i]
}
if i < m {
for j, i = i, i+1; ; i++ {
if p = SA[i]; p < 0 {
SA[j] = ^p
j++
SA[i] = 0
if j == m {
break
}
}
}
}
// Store the length of all substrings.
i = n - 1
j = n - 1
c0 = int(T[n-1])
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
for i >= 0 {
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 > c1 {
break
}
}
if i >= 0 {
SA[m+((i+1)>>1)] = j - i
j = i + 1
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
}
}
// Find the lexicographic names of all substrings.
name = 0
qlen = 0
for i, q = 0, n; i < m; i++ {
p = SA[i]
plen = SA[m+(p>>1)]
diff = true
if (plen == qlen) && ((q + plen) < n) {
for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ {
}
if j == plen {
diff = false
}
}
if diff {
name++
q = p
qlen = plen
}
SA[m+(p>>1)] = name
}
return name
}
func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) {
var b, i, j, t, d int
var c0, c1 int
// Compute SAl.
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
j = n - 1
c1 = int(T[j])
b = B[c1]
j--
if int(T[j]) < c1 {
t = 1
} else {
t = 0
}
j += n
if t&1 > 0 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
for i, d = 0, 0; i < n; i++ {
if j = SA[i]; j > 0 {
if n <= j {
d += 1
j -= n
}
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
t = int(c0) << 1
if int(T[j]) < c1 {
t |= 1
}
if D[t] != d {
j += n
D[t] = d
}
if t&1 > 0 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
SA[i] = 0
} else if j < 0 {
SA[i] = ^j
}
}
for i = n - 1; 0 <= i; i-- {
if SA[i] > 0 {
if SA[i] < n {
SA[i] += n
for j = i - 1; SA[j] < n; j-- {
}
SA[j] -= n
i = j
}
}
}
// Compute SAs.
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
c1 = 0
b = B[c1]
for i, d = n-1, d+1; i >= 0; i-- {
if j = SA[i]; j > 0 {
if n <= j {
d += 1
j -= n
}
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
j--
t = int(c0) << 1
if int(T[j]) > c1 {
t |= 1
}
if D[t] != d {
j += n
D[t] = d
}
b--
if t&1 > 0 {
SA[b] = ^(j + 1)
} else {
SA[b] = j
}
SA[i] = 0
}
}
}
func postProcLMS2_{{.Type}}(SA []int, n, m int) int {
var i, j, d, name int
// Compact all the sorted LMS substrings into the first m items of SA.
name = 0
for i = 0; SA[i] < 0; i++ {
j = ^SA[i]
if n <= j {
name += 1
}
SA[i] = j
}
if i < m {
for d, i = i, i+1; ; i++ {
if j = SA[i]; j < 0 {
j = ^j
if n <= j {
name += 1
}
SA[d] = j
d++
SA[i] = 0
if d == m {
break
}
}
}
}
if name < m {
// Store the lexicographic names.
for i, d = m-1, name+1; 0 <= i; i-- {
if j = SA[i]; n <= j {
j -= n
d--
}
SA[m+(j>>1)] = d
}
} else {
// Unset flags.
for i = 0; i < m; i++ {
if j = SA[i]; n <= j {
j -= n
SA[i] = j
}
}
}
return name
}
func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
var b, i, j int
var c0, c1 int
// Compute SAl.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
j = n - 1
c1 = int(T[j])
b = B[c1]
if j > 0 && int(T[j-1]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
for i = 0; i < n; i++ {
j = SA[i]
SA[i] = ^j
if j > 0 {
j--
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
if j > 0 && int(T[j-1]) < c1 {
SA[b] = ^j
} else {
SA[b] = j
}
b++
}
}
// Compute SAs.
if &C[0] == &B[0] {
getCounts_{{.Type}}(T, C, n, k)
}
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
c1 = 0
b = B[c1]
for i = n - 1; i >= 0; i-- {
if j = SA[i]; j > 0 {
j--
if c0 = int(T[j]); c0 != c1 {
B[c1] = b
c1 = c0
b = B[c1]
}
b--
if (j == 0) || (int(T[j-1]) > c1) {
SA[b] = ^j
} else {
SA[b] = j
}
} else {
SA[i] = ^j
}
}
}
func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) {
const (
minBucketSize = 512
sortLMS2Limit = 0x3fffffff
)
var C, B, D, RA []int
var bo int // Offset of B relative to SA
var b, i, j, m, p, q, name, newfs int
var c0, c1 int
var flags uint
if k <= minBucketSize {
C = make([]int, k)
if k <= fs {
bo = n + fs - k
B = SA[bo:]
flags = 1
} else {
B = make([]int, k)
flags = 3
}
} else if k <= fs {
C = SA[n+fs-k:]
if k <= fs-k {
bo = n + fs - 2*k
B = SA[bo:]
flags = 0
} else if k <= 4*minBucketSize {
B = make([]int, k)
flags = 2
} else {
B = C
flags = 8
}
} else {
C = make([]int, k)
B = C
flags = 4 | 8
}
if n <= sortLMS2Limit && 2 <= (n/k) {
if flags&1 > 0 {
if 2*k <= fs-k {
flags |= 32
} else {
flags |= 16
}
} else if flags == 0 && 2*k <= (fs-2*k) {
flags |= 32
}
}
// Stage 1: Reduce the problem by at least 1/2.
// Sort all the LMS-substrings.
getCounts_{{.Type}}(T, C, n, k)
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
for i = 0; i < n; i++ {
SA[i] = 0
}
b = -1
i = n - 1
j = n
m = 0
c0 = int(T[n-1])
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
for i >= 0 {
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 > c1 {
break
}
}
if i >= 0 {
if b >= 0 {
SA[b] = j
}
B[c1]--
b = B[c1]
j = i
m++
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
}
}
if m > 1 {
if flags&(16|32) > 0 {
if flags&16 > 0 {
D = make([]int, 2*k)
} else {
D = SA[bo-2*k:]
}
B[T[j+1]]++
for i, j = 0, 0; i < k; i++ {
j += C[i]
if B[i] != j {
SA[B[i]] += n
}
D[i] = 0
D[i+k] = 0
}
sortLMS2_{{.Type}}(T, SA, C, B, D, n, k)
name = postProcLMS2_{{.Type}}(SA, n, m)
} else {
sortLMS1_{{.Type}}(T, SA, C, B, n, k)
name = postProcLMS1_{{.Type}}(T, SA, n, m)
}
} else if m == 1 {
SA[b] = j + 1
name = 1
} else {
name = 0
}
// Stage 2: Solve the reduced problem.
// Recurse if names are not yet unique.
if name < m {
newfs = n + fs - 2*m
if flags&(1|4|8) == 0 {
if k+name <= newfs {
newfs -= k
} else {
flags |= 8
}
}
RA = SA[m+newfs:]
for i, j = m+(n>>1)-1, m-1; m <= i; i-- {
if SA[i] != 0 {
RA[j] = SA[i] - 1
j--
}
}
computeSA_int(RA, SA, newfs, m, name)
i = n - 1
j = m - 1
c0 = int(T[n-1])
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
for i >= 0 {
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 > c1 {
break
}
}
if i >= 0 {
RA[j] = i + 1
j--
for {
c1 = c0
if i--; i < 0 {
break
}
if c0 = int(T[i]); c0 < c1 {
break
}
}
}
}
for i = 0; i < m; i++ {
SA[i] = RA[SA[i]]
}
if flags&4 > 0 {
B = make([]int, k)
C = B
}
if flags&2 > 0 {
B = make([]int, k)
}
}
// Stage 3: Induce the result for the original problem.
if flags&8 > 0 {
getCounts_{{.Type}}(T, C, n, k)
}
// Put all left-most S characters into their buckets.
if m > 1 {
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
i = m - 1
j = n
p = SA[m-1]
c1 = int(T[p])
for {
c0 = c1
q = B[c0]
for q < j {
j--
SA[j] = 0
}
for {
j--
SA[j] = p
if i--; i < 0 {
break
}
p = SA[i]
if c1 = int(T[p]); c1 != c0 {
break
}
}
if i < 0 {
break
}
}
for j > 0 {
j--
SA[j] = 0
}
}
induceSA_{{.Type}}(T, SA, C, B, n, k)
}
`

View File

@@ -1,332 +0,0 @@
// Copyright 2017 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen-accessors generates accessor methods for structs with pointer fields.
//
// It is meant to be used by the go-github authors in conjunction with the
// go generate tool before sending a commit to GitHub.
package main
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"sort"
"strings"
"text/template"
)
const (
fileSuffix = "-accessors.go"
)
var (
verbose = flag.Bool("v", false, "Print verbose log messages")
sourceTmpl = template.Must(template.New("source").Parse(source))
// blacklistStructMethod lists "struct.method" combos to skip.
blacklistStructMethod = map[string]bool{
"RepositoryContent.GetContent": true,
"Client.GetBaseURL": true,
"Client.GetUploadURL": true,
"ErrorResponse.GetResponse": true,
"RateLimitError.GetResponse": true,
"AbuseRateLimitError.GetResponse": true,
}
// blacklistStruct lists structs to skip.
blacklistStruct = map[string]bool{
"Client": true,
}
)
func logf(fmt string, args ...interface{}) {
if *verbose {
log.Printf(fmt, args...)
}
}
func main() {
flag.Parse()
fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0)
if err != nil {
log.Fatal(err)
return
}
for pkgName, pkg := range pkgs {
t := &templateData{
filename: pkgName + fileSuffix,
Year: 2017,
Package: pkgName,
Imports: map[string]string{},
}
for filename, f := range pkg.Files {
logf("Processing %v...", filename)
if err := t.processAST(f); err != nil {
log.Fatal(err)
}
}
if err := t.dump(); err != nil {
log.Fatal(err)
}
}
logf("Done.")
}
func (t *templateData) processAST(f *ast.File) error {
for _, decl := range f.Decls {
gd, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
// Skip unexported identifiers.
if !ts.Name.IsExported() {
logf("Struct %v is unexported; skipping.", ts.Name)
continue
}
// Check if the struct is blacklisted.
if blacklistStruct[ts.Name.Name] {
logf("Struct %v is blacklisted; skipping.", ts.Name)
continue
}
st, ok := ts.Type.(*ast.StructType)
if !ok {
continue
}
for _, field := range st.Fields.List {
se, ok := field.Type.(*ast.StarExpr)
if len(field.Names) == 0 || !ok {
continue
}
fieldName := field.Names[0]
// Skip unexported identifiers.
if !fieldName.IsExported() {
logf("Field %v is unexported; skipping.", fieldName)
continue
}
// Check if "struct.method" is blacklisted.
if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklistStructMethod[key] {
logf("Method %v is blacklisted; skipping.", key)
continue
}
switch x := se.X.(type) {
case *ast.ArrayType:
t.addArrayType(x, ts.Name.String(), fieldName.String())
case *ast.Ident:
t.addIdent(x, ts.Name.String(), fieldName.String())
case *ast.MapType:
t.addMapType(x, ts.Name.String(), fieldName.String())
case *ast.SelectorExpr:
t.addSelectorExpr(x, ts.Name.String(), fieldName.String())
default:
logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x)
}
}
}
}
return nil
}
func sourceFilter(fi os.FileInfo) bool {
return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix)
}
func (t *templateData) dump() error {
if len(t.Getters) == 0 {
logf("No getters for %v; skipping.", t.filename)
return nil
}
// Sort getters by ReceiverType.FieldName.
sort.Sort(byName(t.Getters))
var buf bytes.Buffer
if err := sourceTmpl.Execute(&buf, t); err != nil {
return err
}
clean, err := format.Source(buf.Bytes())
if err != nil {
return err
}
logf("Writing %v...", t.filename)
return ioutil.WriteFile(t.filename, clean, 0644)
}
func newGetter(receiverType, fieldName, fieldType, zeroValue string, namedStruct bool) *getter {
return &getter{
sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName),
ReceiverVar: strings.ToLower(receiverType[:1]),
ReceiverType: receiverType,
FieldName: fieldName,
FieldType: fieldType,
ZeroValue: zeroValue,
NamedStruct: namedStruct,
}
}
func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) {
var eltType string
switch elt := x.Elt.(type) {
case *ast.Ident:
eltType = elt.String()
default:
logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt)
return
}
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil", false))
}
func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) {
var zeroValue string
var namedStruct = false
switch x.String() {
case "int", "int64":
zeroValue = "0"
case "string":
zeroValue = `""`
case "bool":
zeroValue = "false"
case "Timestamp":
zeroValue = "Timestamp{}"
default:
zeroValue = "nil"
namedStruct = true
}
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue, namedStruct))
}
func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) {
var keyType string
switch key := x.Key.(type) {
case *ast.Ident:
keyType = key.String()
default:
logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key)
return
}
var valueType string
switch value := x.Value.(type) {
case *ast.Ident:
valueType = value.String()
default:
logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value)
return
}
fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType)
zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType)
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false))
}
func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) {
if strings.ToLower(fieldName[:1]) == fieldName[:1] { // Non-exported field.
return
}
var xX string
if xx, ok := x.X.(*ast.Ident); ok {
xX = xx.String()
}
switch xX {
case "time", "json":
if xX == "json" {
t.Imports["encoding/json"] = "encoding/json"
} else {
t.Imports[xX] = xX
}
fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name)
zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name)
if xX == "time" && x.Sel.Name == "Duration" {
zeroValue = "0"
}
t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue, false))
default:
logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x)
}
}
type templateData struct {
filename string
Year int
Package string
Imports map[string]string
Getters []*getter
}
type getter struct {
sortVal string // Lower-case version of "ReceiverType.FieldName".
ReceiverVar string // The one-letter variable name to match the ReceiverType.
ReceiverType string
FieldName string
FieldType string
ZeroValue string
NamedStruct bool // Getter for named struct.
}
type byName []*getter
func (b byName) Len() int { return len(b) }
func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by gen-accessors; DO NOT EDIT.
package {{.Package}}
{{with .Imports}}
import (
{{- range . -}}
"{{.}}"
{{end -}}
)
{{end}}
{{range .Getters}}
{{if .NamedStruct}}
// Get{{.FieldName}} returns the {{.FieldName}} field.
func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() *{{.FieldType}} {
if {{.ReceiverVar}} == nil {
return {{.ZeroValue}}
}
return {{.ReceiverVar}}.{{.FieldName}}
}
{{else}}
// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise.
func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} {
if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil {
return {{.ZeroValue}}
}
return *{{.ReceiverVar}}.{{.FieldName}}
}
{{end}}
{{end}}
`

View File

@@ -0,0 +1,54 @@
# Created by https://www.gitignore.io/api/go,macos
# Edit at https://www.gitignore.io/?templates=go,macos
### Go ###
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
vault-plugin-secrets-mongodbatlas
vault-plugin-database-mongodbatlas
vault*
.idea/
# End of https://www.gitignore.io/api/go,macos
#

View File

@@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -0,0 +1,50 @@
TOOL?=vault-plugin-database-mongodbatlas
TEST?=$$(go list ./... | grep -v /vendor/)
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
EXTERNAL_TOOLS=\
github.com/mitchellh/gox
BUILD_TAGS?=${TOOL}
GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
# bin generates the releaseable binaries for this plugin
bin: fmtcheck generate
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
default: dev
# dev creates binaries for testing Vault locally. These are put
# into ./bin/ as well as $GOPATH/bin.
dev: fmtcheck generate
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
# test runs the unit tests and vets the code
test: fmtcheck generate
CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -v -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -count=1 -timeout=20m -parallel=4
testcompile: fmtcheck generate
@for pkg in $(TEST) ; do \
go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \
done
# generate runs `go generate` to build the dynamically generated
# source files.
generate:
go generate $(go list ./... | grep -v /vendor/)
# bootstrap the build by downloading additional tools
bootstrap:
@for tool in $(EXTERNAL_TOOLS) ; do \
echo "Installing/Updating $$tool" ; \
go get -u $$tool; \
done
fmtcheck:
@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
fmt:
gofmt -w $(GOFMT_FILES)
proto:
protoc *.proto --go_out=plugins=grpc:.
.PHONY: bin default generate test vet bootstrap fmt fmtcheck

View File

@@ -0,0 +1,225 @@
# Vault Plugins: MongoDB Atlas Secrets Engine and Database Secrets Engine for MongoDB Atlas plugin
**IMPORTANT: This plugin is currently under development. Feel free to test it out following the instructions under the Developing section below, however consider this beta until it is verified by HashiCorp. Once verified and released versions will be documented in a CHANGELOG**
This contains two Secrets Engines specific to MongoDB Atlas for use with [Hashicorp Vault](https://github.com/hashicorp/vault).
The first is the MongoDB Atlas Secrets Engine which generates unique, ephemeral [Programmatic API](https://docs.atlas.mongodb.com/reference/api/apiKeys/) keys for MongoDB Atlas.
The second is an extension of the existing Database Secrets Engine and allows generation of unique, ephemeral
programmatic MongoDB [Database User](https://docs.atlas.mongodb.com/reference/api/database-users/) credentials in MongoDB Atlas, thus we refer to it as the Database Secrets
Engine for MongoDB Atlas.
The plugins are located in the following directories:
- **MongoDB Atlas Secrets Engine:** `plugins/logical/mongodbatlas/`
- **Database Secrets Engine for MongoDB Atlas plugin:** `plugins/database/mongodbatlas`
**Please note**: Hashicorp takes Vault's security and their users' trust very seriously, as does MongoDB.
If you believe you have found a security issue in Vault or with this plugin, _please responsibly disclose_ by
contacting us at [security@hashicorp.com](mailto:security@hashicorp.com) and contact MongoDB
directly via [security@mongodb.com](mailto:security@mongodb.com) or
[open a ticket](https://jira.mongodb.org/plugins/servlet/samlsso?redirectTo=%2Fbrowse%2FSECURITY) (link is external).
## Quick Links
- [Vault Website](https://www.vaultproject.io)
- [MongoDB Atlas Website](https://www.mongodb.com/cloud/atlas)
- [MongoDB Atlas Secrets Engine Docs](https://www.vaultproject.io/docs/secrets/mongodbatlas/index.html)
- [Database Secrets Engine for MongoDB Atlas](https://www.vaultproject.io/docs/secrets/databases/mongodbatlas.html)
- [Vault Github](https://www.github.com/hashicorp/vault)
- [Vault General Announcement List](https://groups.google.com/forum/#!forum/hashicorp-announce)
- [Vault Discussion List](https://groups.google.com/forum/#!forum/vault-tool)
## Usage
**The following will be accurate after review and approval by Hashicorp, which is in progress. Until then follow the instructions in the developing section that follows:**
These are a [Vault specific plugins (aka Secrets Engines/Backends)](https://www.vaultproject.io/docs/internals/plugins.html). This guide assumes you have already installed Vault
and have a basic understanding of how Vault works. Otherwise, first read this guide on
how to [get started with Vault](https://www.vaultproject.io/intro/getting-started/install.html).
If you are using Vault 11.0.1 or above, both plugins are packaged with Vault. The MongoDB Atlas Secrets Engine can be enabled by running:
The MongoDB Atlas Secrets Engine can be enabled by running:
```sh
$ vault secrets enable mongodbatlas
Success! Enabled the mongodbatlas secrets engine at: mongodbatlas/
```
Then, write the configuration for the plugin and the lease, this is an example:
```sh
vault write mongodbatlas/config/root \
public_key="a-public-key" \
private_key="a-private-key"
vault write mongodbatlas/config/lease \
ttl=300 \
max_ttl=4800
```
The Database Secrets Engine for MongoDB Atlas can be enabled by running:
```sh
$ vault secrets enable database
Success! Enabled the database secrets engine at: database/
```
Then, write the configuration for the plugin, for example:
```sh
$ vault write database/config/my-mongodbatlas-database \
plugin_name=mongodbatlas-database-plugin \
allowed_roles="my-role" \
public_key="a-public-key" \
private_key="a-private-key!" \
project_id="a-project-id"
```
If you are testing this plugin in an earlier version of Vault or
want to develop, see the next section.
## Developing
If you wish to work on either plugin, you'll first need [Go](https://www.golang.org)
installed on your machine (whichever version is required by Vault).
Make sure Go is properly installed, including setting up a [GOPATH](https://golang.org/doc/code.html#GOPATH).
### Get Plugin
Clone this repository:
```
mkdir $GOPATH/src/github.com/hashicorp/vault-plugin-secrets-mongodbatlas`
cd $GOPATH/src/github.com/hashicorp/
git clone git@github.com:mongodb/vault-plugin-secrets-mongodbatlas.git
go mod download
```
(or use `go get github.com/mongodb/vault-plugin-secrets-mongodbatlas` ).
Then you can download any of the required tools to bootstrap your environment:
```sh
$ make bootstrap
```
To compile a development version of these plugins, run `make` or `make dev`.
This will put the plugin binaries in the `bin` and `$GOPATH/bin` folders. `dev`
mode will only generate binaries for your platform and is faster:
```sh
$ make
$ make dev
```
### Install Plugin in Vault
Put the plugin binaries into a location of your choice. This directory
will be specified as the [`plugin_directory`](https://www.vaultproject.io/docs/configuration/index.html#plugin_directory)
in the Vault config used to start the server.
```hcl
plugin_directory = "path/to/plugin/directory"
```
Start a Vault server with this config file:
```sh
$ vault server -config=path/to/config.json ...
```
Once the server is started, register the plugins in the Vault server's [plugin catalog](https://www.vaultproject.io/docs/internals/plugins.html#plugin-catalog):
#### MongoDB Atlas Secrets Engine
To register the MongoDB Atlas Secrets Engine run the following:
```sh
$ vault write sys/plugins/catalog/secret/vault-plugin-secrets-mongodbatlas \
sha_256="$(shasum -a 256 path/to/plugin/directory/vault-plugin-secrets-mongodbatlas | cut -d " " -f1)" \
command="vault-plugin-secrets-mongodbatlas"
```
Any name can be substituted for the plugin name "vault-plugin-secrets-mongodbatlas". This
name will be referenced in the next step, where we enable the secrets
plugin backend using the MongoDB Atlas Secrets Engine:
```sh
$ vault secrets enable --plugin-name='vault-plugin-secrets-mongodbatlas' --path="vault-plugin-secrets-mongodbatlas" plugin
```
#### Database Secrets Engine for MongoDB Atlas plugin
The following steps are required to register the Database Secrets Engine for MongoDB Atlas plugin:
```sh
vault write sys/plugins/catalog/database/mongodbatlas-database-plugin \
sha256=$(shasum -a 256 mongodbatlas-database-plugin | cut -d' ' -f1) \
command="mongodbatlas-database-plugin"
```
Then, you must enable the Vault's Database Secret Engine with Vault
```sh
vault secrets enable database
```
### Tests
This plugin has both integration tests, and acceptance tests.
The integration tests are run by `$ make test` and rather than firing real
API calls, they fire API calls at a local test server that returns expected
responses.
The acceptance tests fire real API calls, and are located in `plugins/logical/mongodbatlas/acceptance_test.go`
and `plugins/database/mongodbatlas/mongodbatlas_test.go`. These should be run
once as a final step before placing a PR. Please see `acceptance_test.go` and
`mongodbatlas_test.go` to learn the environment variables that will need to be set.
**Warning:** The acceptance tests create/destroy/modify *real resources*,
which may incur real costs in some cases. In the presence of a bug,
it is technically possible that broken backends could leave dangling
data behind. Therefore, please run the acceptance tests at your own risk.
At the very least, we recommend running them in their own private
account for whatever backend you're testing.
Before running the acceptance tests export the following environment variables:
- VAULT_ACC - Set to `1` to run the acceptance tests
- ATLAS_ORGANIZATION_ID - Your Organization ID
- ATLAS_PUBLIC_KEY and ATLAS_PRIVATE_KEY - Your Public and Private key with the correct permissions to run the tests
- ATLAS_PROJECT_ID - Your Project ID
To run the acceptance tests, after exporting the necessary environment variables,
from the home directory run `VAULT_ACC=1 make test`:
```sh
$ VAULT_ACC=1 make test
```
## Other Docs
**The following will be accurate after review and approval by Hashicorp, which is in progress. Until then read the docs within this repo for more information.**
See up-to-date **MongoDB Atlas Secrets Engine** [docs](https://www.vaultproject.io/docs/secrets/mongodbatlas/index.html),
**Database Secrets Engine for MongoDB Atlas plugin** [docs](https://www.vaultproject.io/docs/secrets/databases/mongodbatlas.html)
and general [API docs](https://www.vaultproject.io/api/secret/mongodbatlas/index.html).

View File

@@ -0,0 +1,100 @@
package mongodbatlas
import (
"context"
"errors"
"sync"
"github.com/Sectorbob/mlab-ns2/gae/ns/digest"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/mitchellh/mapstructure"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
)
type mongoDBAtlasConnectionProducer struct {
PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"`
PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"`
ProjectID string `json:"project_id" structs:"project_id" mapstructure:"project_id"`
Initialized bool
RawConfig map[string]interface{}
Type string
client *mongodbatlas.Client
sync.Mutex
}
func (c *mongoDBAtlasConnectionProducer) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error {
_, err := c.Init(ctx, conf, verifyConnection)
return err
}
// Initialize parses connection configuration.
func (c *mongoDBAtlasConnectionProducer) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) {
c.Lock()
defer c.Unlock()
err := mapstructure.WeakDecode(conf, c)
if err != nil {
return nil, err
}
if len(c.PublicKey) == 0 {
return nil, errors.New("public Key is not set")
}
if len(c.PrivateKey) == 0 {
return nil, errors.New("private Key is not set")
}
c.RawConfig = conf
// Set initialized to true at this point since all fields are set,
// and the connection can be established at a later time.
c.Initialized = true
return conf, nil
}
func (c *mongoDBAtlasConnectionProducer) secretValues() map[string]interface{} {
return map[string]interface{}{
c.PrivateKey: "[private_key]",
}
}
// Close terminates the database connection.
func (c *mongoDBAtlasConnectionProducer) Close() error {
c.Lock()
defer c.Unlock()
c.client = nil
return nil
}
func (c *mongoDBAtlasConnectionProducer) Connection(_ context.Context) (interface{}, error) {
// This is intentionally not grabbing the lock since the calling functions (e.g. CreateUser)
// are claiming it. (The locking patterns could be refactored to be more consistent/clear.)
if !c.Initialized {
return nil, connutil.ErrNotInitialized
}
if c.client != nil {
return c.client, nil
}
transport := digest.NewTransport(c.PublicKey, c.PrivateKey)
cl, err := transport.Client()
if err != nil {
return nil, err
}
client, err := mongodbatlas.New(cl)
if err != nil {
return nil, err
}
c.client = client
return c.client, nil
}

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -ex
make dockerbuild
docker kill vaultplg 2>/dev/null || true
tmpdir=$(mktemp -d vaultplgXXXXXX)
mkdir "$tmpdir/data"
docker run --rm -d -p8200:8200 --name vaultplg -v "$(pwd)/$tmpdir/data":/data -v $(pwd)/bin:/example --cap-add=IPC_LOCK -e 'VAULT_LOCAL_CONFIG=
{
"backend": {"file": {"path": "/data"}},
"listener": [{"tcp": {"address": "0.0.0.0:8200", "tls_disable": true}}],
"plugin_directory": "/example",
"log_level": "debug",
"disable_mlock": true,
"api_addr": "http://localhost:8200"
}
' vault server
sleep 1
export VAULT_ADDR=http://localhost:8200
initoutput=$(vault operator init -key-shares=1 -key-threshold=1 -format=json)
vault operator unseal $(echo "$initoutput" | jq -r .unseal_keys_hex[0])
export VAULT_TOKEN=$(echo "$initoutput" | jq -r .root_token)
vault write sys/plugins/catalog/database/mongodbatlas-database-plugin \
sha256=$(shasum -a 256 bin/vault-plugin-database-mongodbatlas | cut -d' ' -f1) \
command="vault-plugin-database-mongodbatlas"
vault secrets enable database

View File

@@ -0,0 +1,14 @@
module github.com/hashicorp/vault-plugin-database-mongodbatlas
go 1.12
require (
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a
github.com/go-test/deep v1.0.2 // indirect
github.com/hashicorp/go-version v1.2.0 // indirect
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670
github.com/mitchellh/mapstructure v1.1.2
github.com/mongodb/go-client-mongodb-atlas v0.1.2
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f // indirect
)

View File

@@ -0,0 +1,277 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM=
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o=
github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw=
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA=
github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo=
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670 h1:gd2r0gcPNXWKZ9ROsJcdji/ZM/n49FN0ZjRxz/ctTGQ=
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670/go.mod h1:uNC5C562jnrWougFU1BtBR1YtW3dWUrOnad/omSTSAs=
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
github.com/hashicorp/vault/sdk v0.1.14-0.20200111013952-157e805b97be h1:7sRI8Qk4f3RfWCBr6MBd5qUYlQVEumb0iAF2uTVuWpQ=
github.com/hashicorp/vault/sdk v0.1.14-0.20200111013952-157e805b97be/go.mod h1:PuQJXv7VkYB+5k83Ov/3+Z4ta3kwGfHO0CcUh7DHJgk=
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670 h1:fHIWysbdn+hKlzQUiy9E3uRurPDiFW/KftXRwqH14Uc=
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670/go.mod h1:DCTUzAV0pBLCLfXghEBDcGAw2pZaLvdtRphlvjxvPwo=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mongodb/go-client-mongodb-atlas v0.1.2 h1:qmUme1TlQBPZupmXMnpD8DxnfGXLVGs3w+0Z17HBiSA=
github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU=
github.com/mwielbut/pointy v1.1.0 h1:U5/YEfoIkaGCHv0St3CgjduqXID4FNRoyZgLM1kY9vg=
github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -0,0 +1,200 @@
package mongodbatlas
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/database/dbplugin"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
)
const mongoDBAtlasTypeName = "mongodbatlas"
// Verify interface is implemented
var _ dbplugin.Database = &MongoDBAtlas{}
type MongoDBAtlas struct {
*mongoDBAtlasConnectionProducer
credsutil.CredentialsProducer
}
func New() (interface{}, error) {
db := new()
dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues)
return dbType, nil
}
func new() *MongoDBAtlas {
connProducer := &mongoDBAtlasConnectionProducer{}
connProducer.Type = mongoDBAtlasTypeName
credsProducer := &credsutil.SQLCredentialsProducer{
DisplayNameLen: 15,
RoleNameLen: 15,
UsernameLen: 100,
Separator: "-",
}
return &MongoDBAtlas{
mongoDBAtlasConnectionProducer: connProducer,
CredentialsProducer: credsProducer,
}
}
// Run instantiates a MongoDBAtlas object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
func (m *MongoDBAtlas) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
// Grab the lock
m.Lock()
defer m.Unlock()
statements = dbutil.StatementCompatibilityHelper(statements)
if len(statements.Creation) == 0 {
return "", "", dbutil.ErrEmptyCreationStatement
}
client, err := m.getConnection(ctx)
if err != nil {
return "", "", err
}
username, err = m.GenerateUsername(usernameConfig)
if err != nil {
return "", "", err
}
password, err = m.GeneratePassword()
if err != nil {
return "", "", err
}
// Unmarshal statements.CreationStatements into mongodbRoles
var databaseUser mongoDBAtlasStatement
err = json.Unmarshal([]byte(statements.Creation[0]), &databaseUser)
if err != nil {
return "", "", fmt.Errorf("Error unmarshalling statement %s", err)
}
// Default to "admin" if no db provided
if databaseUser.DatabaseName == "" {
databaseUser.DatabaseName = "admin"
}
if len(databaseUser.Roles) == 0 {
return "", "", fmt.Errorf("roles array is required in creation statement")
}
databaseUserRequest := &mongodbatlas.DatabaseUser{
Username: username,
Password: password,
DatabaseName: databaseUser.DatabaseName,
Roles: databaseUser.Roles,
}
_, _, err = client.DatabaseUsers.Create(ctx, m.ProjectID, databaseUserRequest)
if err != nil {
return "", "", err
}
return username, password, nil
}
// RenewUser is not supported on MongoDB, so this is a no-op.
func (m *MongoDBAtlas) RenewUser(ctx context.Context, statements dbplugin.Statements, username string, expiration time.Time) error {
// NOOP
return nil
}
// RevokeUser drops the specified user from the authentication database. If none is provided
// in the revocation statement, the default "admin" authentication database will be assumed.
func (m *MongoDBAtlas) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error {
m.Lock()
defer m.Unlock()
statements = dbutil.StatementCompatibilityHelper(statements)
client, err := m.getConnection(ctx)
if err != nil {
return err
}
_, err = client.DatabaseUsers.Delete(ctx, m.ProjectID, username)
return err
}
// SetCredentials uses provided information to set/create a user in the
// database. Unlike CreateUser, this method requires a username be provided and
// uses the name given, instead of generating a name. This is used for creating
// and setting the password of static accounts, as well as rolling back
// passwords in the database in the event an updated database fails to save in
// Vault's storage.
func (m *MongoDBAtlas) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) {
// Grab the lock
m.Lock()
defer m.Unlock()
statements = dbutil.StatementCompatibilityHelper(statements)
if len(statements.Creation) == 0 {
return "", "", dbutil.ErrEmptyCreationStatement
}
client, err := m.getConnection(ctx)
if err != nil {
return "", "", err
}
username = staticUser.Username
password = staticUser.Password
databaseUserRequest := &mongodbatlas.DatabaseUser{
Password: password,
}
_, _, err = client.DatabaseUsers.Update(context.Background(), m.ProjectID, username, databaseUserRequest)
if err != nil {
return "", "", err
}
return username, password, nil
}
func (m *MongoDBAtlas) getConnection(ctx context.Context) (*mongodbatlas.Client, error) {
client, err := m.Connection(ctx)
if err != nil {
return nil, err
}
return client.(*mongodbatlas.Client), nil
}
// RotateRootCredentials is not currently supported on MongoDB
func (m *MongoDBAtlas) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) {
return nil, errors.New("root credential rotation is not currently implemented in this database secrets engine")
}
// Type returns the TypeName for this backend
func (m *MongoDBAtlas) Type() (string, error) {
return mongoDBAtlasTypeName, nil
}
type mongoDBAtlasStatement struct {
DatabaseName string `json:"database_name"`
Roles []mongodbatlas.Role `json:"roles,omitempty"`
}

View File

@@ -0,0 +1,53 @@
# Created by https://www.gitignore.io/api/go,macos
# Edit at https://www.gitignore.io/?templates=go,macos
### Go ###
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
pkg*
bin*
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
### vsCode ###
.vscode
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
.idea/
# End of https://www.gitignore.io/api/go,macos

View File

@@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -0,0 +1,51 @@
TOOL?=vault-plugin-secrets-mongodbatlas
TEST?=$$(go list ./... | grep -v /vendor/ | grep -v teamcity)
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
EXTERNAL_TOOLS=\
github.com/mitchellh/gox \
github.com/golang/dep/cmd/dep
BUILD_TAGS?=${TOOL}
GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
# bin generates the releaseable binaries for this plugin
bin: fmtcheck generate
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
default: dev
# dev creates binaries for testing Vault locally. These are put
# into ./bin/ as well as $GOPATH/bin.
dev: fmtcheck generate
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
# test runs the unit tests and vets the code
test: fmtcheck generate
CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -v -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -count=1 -timeout=20m -parallel=4
testcompile: fmtcheck generate
@for pkg in $(TEST) ; do \
go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \
done
# generate runs `go generate` to build the dynamically generated
# source files.
generate:
go generate $(go list ./... | grep -v /vendor/)
# bootstrap the build by downloading additional tools
bootstrap:
@for tool in $(EXTERNAL_TOOLS) ; do \
echo "Installing/Updating $$tool" ; \
go get -u $$tool; \
done
fmtcheck:
@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
fmt:
gofmt -w $(GOFMT_FILES)
proto:
protoc *.proto --go_out=plugins=grpc:.
.PHONY: bin default generate test vet bootstrap fmt fmtcheck

View File

@@ -0,0 +1,225 @@
# Vault Plugins: MongoDB Atlas Secrets Engine and Database Secrets Engine for MongoDB Atlas plugin
**IMPORTANT: This plugin is currently under development. Feel free to test it out following the instructions under the Developing section below, however consider this beta until it is verified by HashiCorp. Once verified and released versions will be documented in a CHANGELOG**
This contains two Secrets Engines specific to MongoDB Atlas for use with [Hashicorp Vault](https://github.com/hashicorp/vault).
The first is the MongoDB Atlas Secrets Engine which generates unique, ephemeral [Programmatic API](https://docs.atlas.mongodb.com/reference/api/apiKeys/) keys for MongoDB Atlas.
The second is an extension of the existing Database Secrets Engine and allows generation of unique, ephemeral
programmatic MongoDB [Database User](https://docs.atlas.mongodb.com/reference/api/database-users/) credentials in MongoDB Atlas, thus we refer to it as the Database Secrets
Engine for MongoDB Atlas.
The plugins are located in the following directories:
- **MongoDB Atlas Secrets Engine:** `plugins/logical/mongodbatlas/`
- **Database Secrets Engine for MongoDB Atlas plugin:** `plugins/database/mongodbatlas`
**Please note**: Hashicorp takes Vault's security and their users' trust very seriously, as does MongoDB.
If you believe you have found a security issue in Vault or with this plugin, _please responsibly disclose_ by
contacting us at [security@hashicorp.com](mailto:security@hashicorp.com) and contact MongoDB
directly via [security@mongodb.com](mailto:security@mongodb.com) or
[open a ticket](https://jira.mongodb.org/plugins/servlet/samlsso?redirectTo=%2Fbrowse%2FSECURITY) (link is external).
## Quick Links
- [Vault Website](https://www.vaultproject.io)
- [MongoDB Atlas Website](https://www.mongodb.com/cloud/atlas)
- [MongoDB Atlas Secrets Engine Docs](https://www.vaultproject.io/docs/secrets/mongodbatlas/index.html)
- [Database Secrets Engine for MongoDB Atlas](https://www.vaultproject.io/docs/secrets/databases/mongodbatlas.html)
- [Vault Github](https://www.github.com/hashicorp/vault)
- [Vault General Announcement List](https://groups.google.com/forum/#!forum/hashicorp-announce)
- [Vault Discussion List](https://groups.google.com/forum/#!forum/vault-tool)
## Usage
**The following will be accurate after review and approval by Hashicorp, which is in progress. Until then follow the instructions in the developing section that follows:**
These are a [Vault specific plugins (aka Secrets Engines/Backends)](https://www.vaultproject.io/docs/internals/plugins.html). This guide assumes you have already installed Vault
and have a basic understanding of how Vault works. Otherwise, first read this guide on
how to [get started with Vault](https://www.vaultproject.io/intro/getting-started/install.html).
If you are using Vault 11.0.1 or above, both plugins are packaged with Vault. The MongoDB Atlas Secrets Engine can be enabled by running:
The MongoDB Atlas Secrets Engine can be enabled by running:
```sh
$ vault secrets enable mongodbatlas
Success! Enabled the mongodbatlas secrets engine at: mongodbatlas/
```
Then, write the configuration for the plugin and the lease, this is an example:
```sh
vault write mongodbatlas/config \
public_key="a-public-key" \
private_key="a-private-key"
vault write mongodbatlas/config/lease \
ttl=300 \
max_ttl=4800
```
The Database Secrets Engine for MongoDB Atlas can be enabled by running:
```sh
$ vault secrets enable database
Success! Enabled the database secrets engine at: database/
```
Then, write the configuration for the plugin, for example:
```sh
$ vault write database/config/my-mongodbatlas-database \
plugin_name=mongodbatlas-database-plugin \
allowed_roles="my-role" \
public_key="a-public-key" \
private_key="a-private-key!" \
project_id="a-project-id"
```
If you are testing this plugin in an earlier version of Vault or
want to develop, see the next section.
## Developing
If you wish to work on either plugin, you'll first need [Go](https://www.golang.org)
installed on your machine (whichever version is required by Vault).
Make sure Go is properly installed, including setting up a [GOPATH](https://golang.org/doc/code.html#GOPATH).
### Get Plugin
Clone this repository:
```
mkdir $GOPATH/src/github.com/hashicorp/vault-plugin-secrets-mongodbatlas`
cd $GOPATH/src/github.com/hashicorp/
git clone git@github.com:mongodb/vault-plugin-secrets-mongodbatlas.git
go mod download
```
(or use `go get github.com/mongodb/vault-plugin-secrets-mongodbatlas` ).
Then you can download any of the required tools to bootstrap your environment:
```sh
$ make bootstrap
```
To compile a development version of these plugins, run `make` or `make dev`.
This will put the plugin binaries in the `bin` and `$GOPATH/bin` folders. `dev`
mode will only generate binaries for your platform and is faster:
```sh
$ make
$ make dev
```
### Install Plugin in Vault
Put the plugin binaries into a location of your choice. This directory
will be specified as the [`plugin_directory`](https://www.vaultproject.io/docs/configuration/index.html#plugin_directory)
in the Vault config used to start the server.
```hcl
plugin_directory = "path/to/plugin/directory"
```
Start a Vault server with this config file:
```sh
$ vault server -config=path/to/config.json ...
```
Once the server is started, register the plugins in the Vault server's [plugin catalog](https://www.vaultproject.io/docs/internals/plugins.html#plugin-catalog):
#### MongoDB Atlas Secrets Engine
To register the MongoDB Atlas Secrets Engine run the following:
```sh
$ vault write sys/plugins/catalog/vault-plugin-secrets-mongodbatlas \
sha_256="$(shasum -a 256 path/to/plugin/directory/vault-plugin-secrets-mongodbatlas | cut -d " " -f1)" \
command="vault-plugin-secrets-mongodbatlas"
```
Any name can be substituted for the plugin name "vault-plugin-secrets-mongodbatlas". This
name will be referenced in the next step, where we enable the secrets
plugin backend using the MongoDB Atlas Secrets Engine:
```sh
$ vault secrets enable --plugin-name='vault-plugin-secrets-mongodbatlas' --path="vault-plugin-secrets-mongodbatlas" plugin
```
#### Database Secrets Engine for MongoDB Atlas plugin
The following steps are required to register the Database Secrets Engine for MongoDB Atlas plugin:
```sh
vault write sys/plugins/catalog/database/mongodbatlas-database-plugin \
sha256=$(shasum -a 256 mongodbatlas-database-plugin | cut -d' ' -f1) \
command="mongodbatlas-database-plugin"
```
Then, you must enable the Vault's Database Secret Engine with Vault
```sh
vault secrets enable database
```
### Tests
This plugin has both integration tests, and acceptance tests.
The integration tests are run by `$ make test` and rather than firing real
API calls, they fire API calls at a local test server that returns expected
responses.
The acceptance tests fire real API calls, and are located in `plugins/logical/mongodbatlas/acceptance_test.go`
and `plugins/database/mongodbatlas/mongodbatlas_test.go`. These should be run
once as a final step before placing a PR. Please see `acceptance_test.go` and
`mongodbatlas_test.go` to learn the environment variables that will need to be set.
**Warning:** The acceptance tests create/destroy/modify *real resources*,
which may incur real costs in some cases. In the presence of a bug,
it is technically possible that broken backends could leave dangling
data behind. Therefore, please run the acceptance tests at your own risk.
At the very least, we recommend running them in their own private
account for whatever backend you're testing.
Before running the acceptance tests export the following environment variables:
- VAULT_ACC - Set to `1` to run the acceptance tests
- ATLAS_ORGANIZATION_ID - Your Organization ID
- ATLAS_PUBLIC_KEY and ATLAS_PRIVATE_KEY - Your Public and Private key with the correct permissions to run the tests
- ATLAS_PROJECT_ID - Your Project ID
To run the acceptance tests, after exporting the necessary environment variables,
from the home directory run `VAULT_ACC=1 make test`:
```sh
$ VAULT_ACC=1 make test
```
## Other Docs
**The following will be accurate after review and approval by Hashicorp, which is in progress. Until then read the docs within this repo for more information.**
See up-to-date **MongoDB Atlas Secrets Engine** [docs](https://www.vaultproject.io/docs/secrets/mongodbatlas/index.html),
**Database Secrets Engine for MongoDB Atlas plugin** [docs](https://www.vaultproject.io/docs/secrets/databases/mongodbatlas.html)
and general [API docs](https://www.vaultproject.io/api/secret/mongodbatlas/index.html).

View File

@@ -0,0 +1,76 @@
package mongodbatlas
import (
"context"
"strings"
"sync"
"time"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
)
func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
b := NewBackend(conf.System)
if err := b.Setup(ctx, conf); err != nil {
return nil, err
}
return b, nil
}
func NewBackend(system logical.SystemView) *Backend {
var b Backend
b.Backend = &framework.Backend{
Help: strings.TrimSpace(backendHelp),
PathsSpecial: &logical.Paths{
LocalStorage: []string{
framework.WALPrefix,
},
SealWrapStorage: []string{
"config",
},
},
Paths: []*framework.Path{
b.pathRolesList(),
b.pathRoles(),
b.pathConfig(),
b.pathCredentials(),
},
Secrets: []*framework.Secret{
b.programmaticAPIKeys(),
},
WALRollback: b.pathProgrammaticAPIKeyRollback,
WALRollbackMinAge: minUserRollbackAge,
BackendType: logical.TypeLogical,
}
b.system = system
return &b
}
type Backend struct {
*framework.Backend
credentialMutex sync.RWMutex
clientMutex sync.RWMutex
client *mongodbatlas.Client
system logical.SystemView
}
const backendHelp = `
The MongoDB Atlas backend dynamically generates API keys for a set of
Organization or Project roles. The API keys have a configurable lease
set and are automatically revoked at the end of the lease.
After mounting this backend, the Public and Private keys to generate
API keys must be configured with the "config" path and roles must be
written using the "roles/" endpoints before any API keys can be generated.
`
const minUserRollbackAge = 5 * time.Minute

View File

@@ -0,0 +1,66 @@
package mongodbatlas
import (
"context"
"errors"
"github.com/Sectorbob/mlab-ns2/gae/ns/digest"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
)
func (b *Backend) clientMongo(ctx context.Context, s logical.Storage) (*mongodbatlas.Client, error) {
b.clientMutex.Lock()
defer b.clientMutex.Unlock()
// if the client is already created, just return it
if b.client != nil {
return b.client, nil
}
client, err := nonCachedClient(ctx, s)
if err != nil {
return nil, err
}
b.client = client
return b.client, nil
}
func nonCachedClient(ctx context.Context, s logical.Storage) (*mongodbatlas.Client, error) {
config, err := getRootConfig(ctx, s)
if err != nil {
return nil, err
}
transport := digest.NewTransport(config.PublicKey, config.PrivateKey)
client, err := transport.Client()
if err != nil {
return nil, err
}
return mongodbatlas.NewClient(client), nil
}
func getRootConfig(ctx context.Context, s logical.Storage) (*config, error) {
entry, err := s.Get(ctx, "config")
if err != nil {
return nil, err
}
if entry != nil {
var config config
if err := entry.DecodeJSON(&config); err != nil {
return nil, errwrap.Wrapf("error reading root configuration: {{err}}", err)
}
// return the config, we are done
return &config, nil
}
return nil, errors.New("empty config entry")
}

View File

@@ -0,0 +1,41 @@
#!/usr/bin/env bash
set -ex
make dockerbuild
docker kill vaultplg 2>/dev/null || true
tmpdir=$(mktemp -d vaultplgXXXXXX)
mkdir "$tmpdir/data"
docker run --rm -d -p8200:8200 --name vaultplg -v "$(pwd)/$tmpdir/data":/data -v $(pwd)/bin:/example --cap-add=IPC_LOCK -e 'VAULT_LOCAL_CONFIG=
{
"backend": {"file": {"path": "/data"}},
"listener": [{"tcp": {"address": "0.0.0.0:8200", "tls_disable": true}}],
"plugin_directory": "/example",
"log_level": "debug",
"disable_mlock": true,
"api_addr": "http://localhost:8200"
}
' vault server
sleep 1
export VAULT_ADDR=http://localhost:8200
initoutput=$(vault operator init -key-shares=1 -key-threshold=1 -format=json)
vault operator unseal $(echo "$initoutput" | jq -r .unseal_keys_hex[0])
export VAULT_TOKEN=$(echo "$initoutput" | jq -r .root_token)
vault write sys/plugins/catalog/secret/vault-plugin-secrets-mongodbatlas \
sha_256=$(shasum -a 256 bin/vault-plugin-secrets-mongodbatlas | cut -d' ' -f1) \
command="vault-plugin-secrets-mongodbatlas"
vault secrets enable \
-path="mongodbatlas" \
-plugin-name="vault-plugin-secrets-mongodbatlas" plugin
vault write sys/plugins/catalog/database/mongodbatlas-database-plugin \
sha256=$(shasum -a 256 bin/mongodbatlas-database-plugin | cut -d' ' -f1) \
command="mongodbatlas-database-plugin"
vault secrets enable database

View File

@@ -0,0 +1,17 @@
module github.com/hashicorp/vault-plugin-secrets-mongodbatlas
go 1.12
require (
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a
github.com/armon/go-radix v1.0.0 // indirect
github.com/go-test/deep v1.0.2
github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-version v1.2.0 // indirect
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670
github.com/mitchellh/mapstructure v1.1.2
github.com/mongodb/go-client-mongodb-atlas v0.1.2
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f // indirect
)

View File

@@ -0,0 +1,277 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM=
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o=
github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88 h1:YMLQiDrLQT1QEollUbw+yxpW0yJiKVHDGmMpBISeACA=
github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw=
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo=
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670 h1:gd2r0gcPNXWKZ9ROsJcdji/ZM/n49FN0ZjRxz/ctTGQ=
github.com/hashicorp/vault/api v1.0.5-0.20200124174203-d7d4084c8670/go.mod h1:uNC5C562jnrWougFU1BtBR1YtW3dWUrOnad/omSTSAs=
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
github.com/hashicorp/vault/sdk v0.1.14-0.20200111013952-157e805b97be h1:7sRI8Qk4f3RfWCBr6MBd5qUYlQVEumb0iAF2uTVuWpQ=
github.com/hashicorp/vault/sdk v0.1.14-0.20200111013952-157e805b97be/go.mod h1:PuQJXv7VkYB+5k83Ov/3+Z4ta3kwGfHO0CcUh7DHJgk=
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670 h1:fHIWysbdn+hKlzQUiy9E3uRurPDiFW/KftXRwqH14Uc=
github.com/hashicorp/vault/sdk v0.1.14-0.20200124174203-d7d4084c8670/go.mod h1:DCTUzAV0pBLCLfXghEBDcGAw2pZaLvdtRphlvjxvPwo=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mongodb/go-client-mongodb-atlas v0.1.2 h1:qmUme1TlQBPZupmXMnpD8DxnfGXLVGs3w+0Z17HBiSA=
github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU=
github.com/mwielbut/pointy v1.1.0 h1:U5/YEfoIkaGCHv0St3CgjduqXID4FNRoyZgLM1kY9vg=
github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -0,0 +1,93 @@
package mongodbatlas
import (
"context"
"errors"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
func (b *Backend) pathConfig() *framework.Path {
return &framework.Path{
Pattern: "config",
Fields: map[string]*framework.FieldSchema{
"public_key": {
Type: framework.TypeString,
Description: "MongoDB Atlas Programmatic Public Key",
Required: true,
},
"private_key": {
Type: framework.TypeString,
Description: "MongoDB Atlas Programmatic Private Key",
Required: true,
DisplayAttrs: &framework.DisplayAttributes{
Sensitive: true,
},
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathConfigWrite,
logical.ReadOperation: b.pathConfigRead,
},
HelpSynopsis: pathConfigHelpSyn,
HelpDescription: pathConfigHelpDesc,
}
}
func (b *Backend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
publicKey := data.Get("public_key").(string)
if publicKey == "" {
return nil, errors.New("public_key is empty")
}
privateKey := data.Get("private_key").(string)
if privateKey == "" {
return nil, errors.New("private_key is empty")
}
entry, err := logical.StorageEntryJSON("config", config{
PublicKey: publicKey,
PrivateKey: privateKey,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(ctx, entry); err != nil {
return nil, err
}
// Clean cached client (if any)
b.client = nil
return nil, nil
}
func (b *Backend) pathConfigRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
cfg, err := getRootConfig(ctx, req.Storage)
if err != nil {
return nil, err
}
return &logical.Response{
Data: map[string]interface{}{
"public_key": cfg.PublicKey,
},
}, nil
}
type config struct {
PrivateKey string `json:"private_key"`
PublicKey string `json:"public_key"`
}
const pathConfigHelpSyn = `
Configure the credentials that are used to manage Database Users.
`
const pathConfigHelpDesc = `
Before doing anything, the Atlas backend needs credentials that are able
to manage databaseusers, access keys, etc. This endpoint is used to
configure those credentials.
`

View File

@@ -0,0 +1,81 @@
package mongodbatlas
import (
"context"
"errors"
"fmt"
"regexp"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/base62"
"github.com/hashicorp/vault/sdk/logical"
)
var displayNameRegex = regexp.MustCompile("[^a-zA-Z0-9+=,.@_-]")
func (b *Backend) pathCredentials() *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": {
Type: framework.TypeLowerCaseString,
Description: "Name of the role",
Required: true,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathCredentialsRead,
logical.UpdateOperation: b.pathCredentialsRead,
},
HelpSynopsis: pathCredentialsHelpSyn,
HelpDescription: pathCredentialsHelpDesc,
}
}
func (b *Backend) pathCredentialsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
userName := d.Get("name").(string)
cred, err := b.credentialRead(ctx, req.Storage, userName)
if err != nil {
return nil, errwrap.Wrapf("error retrieving credential: {{err}}", err)
}
if cred == nil {
return nil, errors.New("error retrieving credential: credential is nil")
}
return b.programmaticAPIKeyCreate(ctx, req.Storage, userName, cred)
}
type walEntry struct {
UserName string
ProjectID string
OrganizationID string
ProgrammaticAPIKeyID string
}
func genUsername(displayName string) (string, error) {
midString := displayNameRegex.ReplaceAllString(displayName, "_")
id, err := base62.Random(20)
if err != nil {
return "", err
}
ret := fmt.Sprintf("vault-%s-%s", midString, id)
return ret, nil
}
const pathCredentialsHelpSyn = `
Generate MongoDB Atlas Programmatic API from a specific Vault role.
`
const pathCredentialsHelpDesc = `
This path reads generates MongoDB Atlas Programmatic API Keys for
a particular role. Atlas Programmatic API Keys will be
generated on demand and will be automatically revoked when
the lease is up.
`

View File

@@ -0,0 +1,260 @@
package mongodbatlas
import (
"context"
"fmt"
"time"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
func (b *Backend) pathRoles() *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": {
Type: framework.TypeLowerCaseString,
Description: "Name of the Roles",
Required: true,
},
"project_id": {
Type: framework.TypeString,
Description: fmt.Sprintf("Project ID the %s API key belongs to.", projectProgrammaticAPIKey),
},
"roles": {
Type: framework.TypeCommaStringSlice,
Description: fmt.Sprintf("List of roles that the API Key should be granted. A minimum of one role must be provided. Any roles provided must be valid for the assigned Project, required for %s and %s keys.", orgProgrammaticAPIKey, projectProgrammaticAPIKey),
Required: true,
},
"organization_id": {
Type: framework.TypeString,
Description: fmt.Sprintf("Organization ID required for an %s API key", orgProgrammaticAPIKey),
},
"ip_addresses": {
Type: framework.TypeCommaStringSlice,
Description: fmt.Sprintf("IP address to be added to the whitelist for the API key. Optional for %s and %s keys.", orgProgrammaticAPIKey, projectProgrammaticAPIKey),
},
"cidr_blocks": {
Type: framework.TypeCommaStringSlice,
Description: fmt.Sprintf("Whitelist entry in CIDR notation to be added for the API key. Optional for %s and %s keys.", orgProgrammaticAPIKey, projectProgrammaticAPIKey),
},
"project_roles": {
Type: framework.TypeCommaStringSlice,
Description: fmt.Sprintf("Roles assigned when an %s API Key is assigned to a %s API key", orgProgrammaticAPIKey, projectProgrammaticAPIKey),
},
"ttl": {
Type: framework.TypeDurationSecond,
Description: `Duration in seconds after which the issued credential should expire. Defaults to 0, in which case the value will fallback to the system/mount defaults.`,
},
"max_ttl": {
Type: framework.TypeDurationSecond,
Description: "The maximum allowed lifetime of credentials issued using this role.",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.DeleteOperation: b.pathRolesDelete,
logical.ReadOperation: b.pathRolesRead,
logical.UpdateOperation: b.pathRolesWrite,
},
HelpSynopsis: pathRolesHelpSyn,
HelpDescription: pathRolesHelpDesc,
}
}
func (b *Backend) pathRolesDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete(ctx, "roles/"+d.Get("name").(string))
return nil, err
}
func (b *Backend) pathRolesRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entry, err := b.credentialRead(ctx, req.Storage, d.Get("name").(string))
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
return &logical.Response{
Data: entry.toResponseData(),
}, nil
}
func (b *Backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
var resp logical.Response
credentialName := d.Get("name").(string)
if credentialName == "" {
return logical.ErrorResponse("missing role name"), nil
}
b.credentialMutex.Lock()
defer b.credentialMutex.Unlock()
credentialEntry, err := b.credentialRead(ctx, req.Storage, credentialName)
if err != nil {
return nil, err
}
if credentialEntry == nil {
credentialEntry = &atlasCredentialEntry{}
}
if organizationIDRaw, ok := d.GetOk("organization_id"); ok {
credentialEntry.OrganizationID = organizationIDRaw.(string)
}
getAPIWhitelistArgs(credentialEntry, d)
if projectIDRaw, ok := d.GetOk("project_id"); ok {
projectID := projectIDRaw.(string)
credentialEntry.ProjectID = projectID
}
if len(credentialEntry.OrganizationID) == 0 && len(credentialEntry.ProjectID) == 0 {
return logical.ErrorResponse("organization_id or project_id are required"), nil
}
if programmaticKeyRolesRaw, ok := d.GetOk("roles"); ok {
credentialEntry.Roles = programmaticKeyRolesRaw.([]string)
} else {
return logical.ErrorResponse("%s is required for %s and %s keys", "roles", orgProgrammaticAPIKey, projectProgrammaticAPIKey), nil
}
if projectRolesRaw, ok := d.GetOk("project_roles"); ok {
credentialEntry.ProjectRoles = projectRolesRaw.([]string)
} else {
if isAssignedToProject(credentialEntry.OrganizationID, credentialEntry.ProjectID) {
return logical.ErrorResponse("%s is required if both %s and %s are supplied", "roles", "organization_id", "project_id"), nil
}
}
if ttlRaw, ok := d.GetOk("ttl"); ok {
credentialEntry.TTL = time.Duration(ttlRaw.(int)) * time.Second
}
if maxttlRaw, ok := d.GetOk("max_ttl"); ok {
credentialEntry.MaxTTL = time.Duration(maxttlRaw.(int)) * time.Second
}
if credentialEntry.MaxTTL > 0 && credentialEntry.TTL > credentialEntry.MaxTTL {
return logical.ErrorResponse("ttl exceeds max_ttl"), nil
}
if err := setAtlasCredential(ctx, req.Storage, credentialName, credentialEntry); err != nil {
return nil, err
}
return &resp, nil
}
func getAPIWhitelistArgs(credentialEntry *atlasCredentialEntry, d *framework.FieldData) {
if cidrBlocks, ok := d.GetOk("cidr_blocks"); ok {
credentialEntry.CIDRBlocks = cidrBlocks.([]string)
}
if addresses, ok := d.GetOk("ip_addresses"); ok {
credentialEntry.IPAddresses = addresses.([]string)
}
}
func setAtlasCredential(ctx context.Context, s logical.Storage, credentialName string, credentialEntry *atlasCredentialEntry) error {
if credentialName == "" {
return fmt.Errorf("empty role name")
}
if credentialEntry == nil {
return fmt.Errorf("emtpy credentialEntry")
}
entry, err := logical.StorageEntryJSON("roles/"+credentialName, credentialEntry)
if err != nil {
return err
}
if entry == nil {
return fmt.Errorf("nil result when writing to storage")
}
if err := s.Put(ctx, entry); err != nil {
return err
}
return nil
}
func (b *Backend) credentialRead(ctx context.Context, s logical.Storage, credentialName string) (*atlasCredentialEntry, error) {
if credentialName == "" {
return nil, fmt.Errorf("missing credential name")
}
entry, err := s.Get(ctx, "roles/"+credentialName)
if err != nil {
return nil, err
}
var credentialEntry atlasCredentialEntry
if entry != nil {
if err := entry.DecodeJSON(&credentialEntry); err != nil {
return nil, err
}
return &credentialEntry, nil
}
// Return nil here because all callers expect that if an entry
// is nil, the method will return nil, nil.
return nil, nil
}
type atlasCredentialEntry struct {
ProjectID string `json:"project_id"`
DatabaseName string `json:"database_name"`
Roles []string `json:"roles"`
OrganizationID string `json:"organization_id"`
CIDRBlocks []string `json:"cidr_blocks"`
IPAddresses []string `json:"ip_addresses"`
ProjectRoles []string `json:"project_roles"`
TTL time.Duration `json:"ttl"`
MaxTTL time.Duration `json:"max_ttl"`
}
func (r atlasCredentialEntry) toResponseData() map[string]interface{} {
respData := map[string]interface{}{
"project_id": r.ProjectID,
"database_name": r.DatabaseName,
"roles": r.Roles,
"organization_id": r.OrganizationID,
"cidr_blocks": r.CIDRBlocks,
"ip_addresses": r.IPAddresses,
"project_roles": r.ProjectRoles,
"ttl": r.TTL.Seconds(),
"max_ttl": r.MaxTTL.Seconds(),
}
return respData
}
const pathRolesHelpSyn = `
Manage the roles used to generate MongoDB Atlas Programmatic API Keys.
`
const pathRolesHelpDesc = `
This path lets you manage the roles used to generate MongoDB Atlas Programmatic API Keys
The "project_id" parameter specifies a project where the Programmatic API Key will be
created.
"organization_id" parameter specifies in which Organization the key will be created.
If both are specified, the key will be created with the "organization_id" and then
assigned to the Project with the provided "project_id".
The "roles" parameter specifies the MongoDB Atlas Programmatic Key roles that should be assigned
to the Programmatic API keys created for a given role. At least one role should be provided
and must be valid for key level (project or org).
"ip_addresses" and "cidr_blocks" are used to add whitelist entries for the API key.
"project_roles" is used when both "organization_id" and "project_id" are supplied.
And it's a list of roles that the API Key should be granted. A minimum of one role
must be provided. Any roles provided must be valid for the assigned Project
To validate the keys, attempt to read an access key after writing the policy.
`
const orgProgrammaticAPIKey = `organization`
const projectProgrammaticAPIKey = `project`
const programmaticAPIKey = `programmatic_api_key`

View File

@@ -0,0 +1,33 @@
package mongodbatlas
import (
"context"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
func (b *Backend) pathRolesList() *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.operationListRoles,
},
HelpSynopsis: pathRolesListHelpSyn,
HelpDescription: pathRolesListHelpDesc,
}
}
func (b *Backend) operationListRoles(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List(ctx, "roles/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
const pathRolesListHelpSyn = `List the existing roles in this backend`
const pathRolesListHelpDesc = `Roles will be listed by the role name.`

View File

@@ -0,0 +1,304 @@
package mongodbatlas
import (
"context"
"errors"
"fmt"
"net/http"
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mitchellh/mapstructure"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
)
func (b *Backend) programmaticAPIKeys() *framework.Secret {
return &framework.Secret{
Type: programmaticAPIKey,
Fields: map[string]*framework.FieldSchema{
"public_key": {
Type: framework.TypeString,
Description: "Programmatic API Key Public Key",
},
"private_key": {
Type: framework.TypeString,
Description: "Programmatic API Key Private Key",
},
},
Renew: b.programmaticAPIKeysRenew,
Revoke: b.programmaticAPIKeyRevoke,
}
}
func (b *Backend) programmaticAPIKeyCreate(ctx context.Context, s logical.Storage, displayName string, cred *atlasCredentialEntry) (*logical.Response, error) {
apiKeyDescription, err := genUsername(displayName)
if err != nil {
return nil, errwrap.Wrapf("error generating username: {{err}}", err)
}
client, err := b.clientMongo(ctx, s)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
walID, err := framework.PutWAL(ctx, s, programmaticAPIKey, &walEntry{
UserName: apiKeyDescription,
})
if err != nil {
return nil, errwrap.Wrapf("error writing WAL entry: {{err}}", err)
}
var key *mongodbatlas.APIKey
switch {
case isOrgKey(cred.OrganizationID, cred.ProjectID):
key, err = createOrgKey(ctx, client, apiKeyDescription, cred)
case isProjectKey(cred.OrganizationID, cred.ProjectID):
key, err = createProjectAPIKey(ctx, client, apiKeyDescription, cred)
case isAssignedToProject(cred.OrganizationID, cred.ProjectID):
key, err = createAndAssignKey(ctx, client, apiKeyDescription, cred)
}
if err != nil {
if walErr := framework.DeleteWAL(ctx, s, walID); walErr != nil {
dbUserErr := errwrap.Wrapf("error creating programmaticAPIKey: {{err}}", err)
return nil, errwrap.Wrap(errwrap.Wrapf("failed to delete WAL entry: {{err}}", walErr), dbUserErr)
}
return logical.ErrorResponse("Error creating programmatic api key: %s", err), err
}
if key == nil {
return nil, errors.New("error creating credential")
}
if err := framework.DeleteWAL(ctx, s, walID); err != nil {
return nil, errwrap.Wrapf("failed to commit WAL entry: {{err}}", err)
}
resp := b.Secret(programmaticAPIKey).Response(map[string]interface{}{
"public_key": key.PublicKey,
"private_key": key.PrivateKey,
"description": apiKeyDescription,
}, map[string]interface{}{
"programmatic_api_key_id": key.ID,
"project_id": cred.ProjectID,
"organization_id": cred.OrganizationID,
})
defaultLease, maxLease := b.getDefaultAndMaxLease()
// If defined, credential TTL overrides default lease configuration
if cred.TTL > 0 {
defaultLease = cred.TTL
}
if cred.MaxTTL > 0 {
maxLease = cred.MaxTTL
}
resp.Secret.TTL = defaultLease
resp.Secret.MaxTTL = maxLease
return resp, nil
}
func createOrgKey(ctx context.Context, client *mongodbatlas.Client, apiKeyDescription string, credentialEntry *atlasCredentialEntry) (*mongodbatlas.APIKey, error) {
key, _, err := client.APIKeys.Create(ctx, credentialEntry.OrganizationID,
&mongodbatlas.APIKeyInput{
Desc: apiKeyDescription,
Roles: credentialEntry.Roles,
})
if err != nil {
return nil, err
}
if err := addWhitelistEntry(ctx, client, credentialEntry.OrganizationID, key.ID, credentialEntry); err != nil {
return nil, err
}
return key, nil
}
func createProjectAPIKey(ctx context.Context, client *mongodbatlas.Client, apiKeyDescription string, credentialEntry *atlasCredentialEntry) (*mongodbatlas.APIKey, error) {
key, _, err := client.ProjectAPIKeys.Create(ctx, credentialEntry.ProjectID,
&mongodbatlas.APIKeyInput{
Desc: apiKeyDescription,
Roles: credentialEntry.Roles,
})
return key, err
}
func createAndAssignKey(ctx context.Context, client *mongodbatlas.Client, apiKeyDescription string, credentialEntry *atlasCredentialEntry) (*mongodbatlas.APIKey, error) {
key, err := createOrgKey(ctx, client, apiKeyDescription, credentialEntry)
if err != nil {
return nil, err
}
if _, err := client.ProjectAPIKeys.Assign(ctx, credentialEntry.ProjectID, key.ID, &mongodbatlas.AssignAPIKey{
Roles: credentialEntry.ProjectRoles,
}); err != nil {
return nil, err
}
return key, nil
}
func addWhitelistEntry(ctx context.Context, client *mongodbatlas.Client, orgID string, keyID string, cred *atlasCredentialEntry) error {
var entries []*mongodbatlas.WhitelistAPIKeysReq
for _, cidrBlock := range cred.CIDRBlocks {
cidr := &mongodbatlas.WhitelistAPIKeysReq{
CidrBlock: cidrBlock,
}
entries = append(entries, cidr)
}
for _, ipAddress := range cred.IPAddresses {
ip := &mongodbatlas.WhitelistAPIKeysReq{
IPAddress: ipAddress,
}
entries = append(entries, ip)
}
if entries != nil {
_, _, err := client.WhitelistAPIKeys.Create(ctx, orgID, keyID, entries)
return err
}
return nil
}
func (b *Backend) programmaticAPIKeyRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
programmaticAPIKeyIDRaw, ok := req.Secret.InternalData["programmatic_api_key_id"]
if !ok {
return nil, fmt.Errorf("secret is missing programmatic api key id internal data")
}
programmaticAPIKeyID, ok := programmaticAPIKeyIDRaw.(string)
if !ok {
return nil, fmt.Errorf("secret is missing programmatic api key id internal data")
}
organizationID := ""
organizationIDRaw, ok := req.Secret.InternalData["organization_id"]
if ok {
organizationID, ok = organizationIDRaw.(string)
if !ok {
return nil, fmt.Errorf("secret is missing organization id internal data")
}
}
projectID := ""
projectIDRaw, ok := req.Secret.InternalData["project_id"]
if ok {
projectID, ok = projectIDRaw.(string)
if !ok {
return nil, fmt.Errorf("secret is missing project_id internal data")
}
}
var data = map[string]interface{}{
"organization_id": organizationID,
"programmatic_api_key_id": programmaticAPIKeyID,
"project_id": projectID,
}
// Use the user rollback mechanism to delete this database_user
if err := b.pathProgrammaticAPIKeyRollback(ctx, req, programmaticAPIKey, data); err != nil {
return nil, err
}
return nil, nil
}
func (b *Backend) pathProgrammaticAPIKeyRollback(ctx context.Context, req *logical.Request, _kind string, data interface{}) error {
var entry walEntry
if err := mapstructure.Decode(data, &entry); err != nil {
return err
}
// Get the client
client, err := b.clientMongo(ctx, req.Storage)
if err != nil {
return nil
}
switch {
case isOrgKey(entry.OrganizationID, entry.ProjectID):
// check if the user exists or not
_, res, err := client.APIKeys.Get(ctx, entry.OrganizationID, entry.ProgrammaticAPIKeyID)
// if the user is gone, move along
if err != nil {
if res != nil && res.StatusCode == http.StatusNotFound {
return nil
}
return err
}
// now, delete the api key
res, err = client.APIKeys.Delete(ctx, entry.OrganizationID, entry.ProgrammaticAPIKeyID)
if err != nil {
if res != nil && res.StatusCode == http.StatusNotFound {
return nil
}
return err
}
case isProjectKey(entry.OrganizationID, entry.ProjectID):
// now, delete the user
res, err := client.ProjectAPIKeys.Unassign(ctx, entry.ProjectID, entry.ProgrammaticAPIKeyID)
if err != nil {
if res != nil && res.StatusCode == http.StatusNotFound {
return nil
}
return err
}
case isAssignedToProject(entry.OrganizationID, entry.ProjectID):
// check if the user exists or not
_, res, err := client.APIKeys.Get(ctx, entry.OrganizationID, entry.ProgrammaticAPIKeyID)
// if the user is gone, move along
if err != nil {
if res != nil && res.StatusCode == http.StatusNotFound {
return nil
}
return err
}
// now, delete the api key
res, err = client.APIKeys.Delete(ctx, entry.OrganizationID, entry.ProgrammaticAPIKeyID)
if err != nil {
if res != nil && res.StatusCode == http.StatusNotFound {
return nil
}
return err
}
}
return nil
}
func (b *Backend) programmaticAPIKeysRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
// Get the lease (if any)
defaultLease, maxLease := b.getDefaultAndMaxLease()
resp := &logical.Response{Secret: req.Secret}
resp.Secret.TTL = defaultLease
resp.Secret.MaxTTL = maxLease
return resp, nil
}
func (b *Backend) getDefaultAndMaxLease() (time.Duration, time.Duration) {
maxLease := b.system.MaxLeaseTTL()
defaultLease := b.system.DefaultLeaseTTL()
if defaultLease > maxLease {
maxLease = defaultLease
}
return defaultLease, maxLease
}

View File

@@ -0,0 +1,287 @@
package mongodbatlas
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/vault/sdk/logical"
)
type testEnv struct {
PublicKey string
PrivateKey string
ProjectID string
OrganizationID string
Backend logical.Backend
Context context.Context
Storage logical.Storage
MostRecentSecret *logical.Secret
}
func (e *testEnv) AddConfig(t *testing.T) {
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config",
Storage: e.Storage,
Data: map[string]interface{}{
"public_key": e.PublicKey,
"private_key": e.PrivateKey,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
if resp != nil {
t.Fatal("expected nil response to represent a 204")
}
}
func (e *testEnv) AddLeaseConfig(t *testing.T) {
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/lease",
Storage: e.Storage,
Data: map[string]interface{}{
"ttl": "80s",
"max_ttl": "160s",
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
if resp != nil {
t.Fatal("expected nil response to represent a 204")
}
}
func (e *testEnv) AddProgrammaticAPIKeyRole(t *testing.T) {
roles := []string{"ORG_MEMBER"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"organization_id": e.OrganizationID,
"roles": roles,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleWithProjectIDAndOrgID(t *testing.T) {
roles := []string{"ORG_MEMBER"}
projectRoles := []string{"GROUP_READ_ONLY"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"organization_id": e.OrganizationID,
"project_id": e.ProjectID,
"roles": roles,
"project_roles": projectRoles,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleWithTTL(t *testing.T) {
roles := []string{"ORG_MEMBER"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"organization_id": e.OrganizationID,
"roles": roles,
"ttl": "20s",
"max_ttl": "60s",
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleWithIP(t *testing.T) {
roles := []string{"ORG_MEMBER"}
ips := []string{"192.168.1.1", "192.168.1.2"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"organization_id": e.OrganizationID,
"roles": roles,
"ip_addresses": ips,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleProjectWithIP(t *testing.T) {
roles := []string{"ORG_MEMBER"}
ips := []string{"192.168.1.1", "192.168.1.2"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"project_id": e.ProjectID,
"roles": roles,
"ip_addresses": ips,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleWithCIDR(t *testing.T) {
roles := []string{"ORG_MEMBER"}
cidrBlocks := []string{"179.154.224.2/32"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"organization_id": e.OrganizationID,
"roles": roles,
"cidr_blocks": cidrBlocks,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleWithCIDRAndIP(t *testing.T) {
roles := []string{"ORG_MEMBER"}
cidrBlocks := []string{"179.154.224.2/32"}
ips := []string{"192.168.1.1", "192.168.1.2"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"organization_id": e.OrganizationID,
"roles": roles,
"cidr_blocks": cidrBlocks,
"ip_addresses": ips,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) AddProgrammaticAPIKeyRoleWithProjectID(t *testing.T) {
roles := []string{"ORG_MEMBER"}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/test-programmatic-key",
Storage: e.Storage,
Data: map[string]interface{}{
"roles": roles,
"project_id": e.ProjectID,
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
}
func (e *testEnv) ReadProgrammaticAPIKeyRule(t *testing.T) {
req := &logical.Request{
Operation: logical.ReadOperation,
Path: "creds/test-programmatic-key",
Storage: e.Storage,
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
if resp == nil {
t.Fatal("expected a response")
}
if resp.Data["public_key"] == "" {
t.Fatal("failed to receive access_key")
}
if resp.Data["private_key"] == "" {
t.Fatal("failed to receive secret_key")
}
e.MostRecentSecret = resp.Secret
}
func (e *testEnv) CheckLease(t *testing.T) {
ttl := int(e.MostRecentSecret.TTL.Seconds())
wantedTTL := 20
maxTTL := int(e.MostRecentSecret.MaxTTL.Seconds())
wantedMaxTTL := 60
if ttl != wantedTTL {
t.Fatal(fmt.Sprintf("ttl=%d, wanted=%d", ttl, wantedTTL))
}
if maxTTL != wantedMaxTTL {
t.Fatal(fmt.Sprintf("maxTTL=%d, wanted=%d", ttl, wantedMaxTTL))
}
}
func (e *testEnv) RenewProgrammaticAPIKeys(t *testing.T) {
req := &logical.Request{
Operation: logical.RenewOperation,
Storage: e.Storage,
Secret: e.MostRecentSecret,
Data: map[string]interface{}{
"lease_id": "foo",
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
if resp == nil {
t.Fatal("expected a response")
}
if resp.Secret != e.MostRecentSecret {
t.Fatalf("expected %+v but got %+v", e.MostRecentSecret, resp.Secret)
}
}
func (e *testEnv) RevokeProgrammaticAPIKeys(t *testing.T) {
req := &logical.Request{
Operation: logical.RevokeOperation,
Storage: e.Storage,
Secret: e.MostRecentSecret,
Data: map[string]interface{}{
"lease_id": "foo",
},
}
resp, err := e.Backend.HandleRequest(e.Context, req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
}
if resp != nil {
t.Fatal("expected nil response to represent a 204")
}
}

View File

@@ -0,0 +1,13 @@
package mongodbatlas
func isOrgKey(orgID, projectID string) bool {
return len(orgID) > 0 && len(projectID) == 0
}
func isProjectKey(orgID, projectID string) bool {
return len(orgID) == 0 && len(projectID) > 0
}
func isAssignedToProject(orgID, projectID string) bool {
return len(orgID) > 0 && len(projectID) > 0
}

93
vendor/github.com/lib/pq/oid/gen.go generated vendored
View File

@@ -1,93 +0,0 @@
// +build ignore
// Generate the table of OID values
// Run with 'go run gen.go'.
package main
import (
"database/sql"
"fmt"
"log"
"os"
"os/exec"
"strings"
_ "github.com/lib/pq"
)
// OID represent a postgres Object Identifier Type.
type OID struct {
ID int
Type string
}
// Name returns an upper case version of the oid type.
func (o OID) Name() string {
return strings.ToUpper(o.Type)
}
func main() {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
db, err := sql.Open("postgres", "")
if err != nil {
log.Fatal(err)
}
rows, err := db.Query(`
SELECT typname, oid
FROM pg_type WHERE oid < 10000
ORDER BY oid;
`)
if err != nil {
log.Fatal(err)
}
oids := make([]*OID, 0)
for rows.Next() {
var oid OID
if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
log.Fatal(err)
}
oids = append(oids, &oid)
}
if err = rows.Err(); err != nil {
log.Fatal(err)
}
cmd := exec.Command("gofmt")
cmd.Stderr = os.Stderr
w, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
}
f, err := os.Create("types.go")
if err != nil {
log.Fatal(err)
}
cmd.Stdout = f
err = cmd.Start()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
fmt.Fprintln(w, "\npackage oid")
fmt.Fprintln(w, "const (")
for _, oid := range oids {
fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
}
fmt.Fprintln(w, ")")
fmt.Fprintln(w, "var TypeName = map[Oid]string{")
for _, oid := range oids {
fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
}
fmt.Fprintln(w, "}")
w.Close()
cmd.Wait()
}

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2018] [© AnyChart.com - JavaScript charts](https://www.anychart.com).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,180 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
"net/url"
)
const apiKeysPath = "orgs/%s/apiKeys"
// APIKeysService is an interface for interfacing with the APIKeys
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/apiKeys/
type APIKeysService interface {
List(context.Context, string, *ListOptions) ([]APIKey, *Response, error)
Get(context.Context, string, string) (*APIKey, *Response, error)
Create(context.Context, string, *APIKeyInput) (*APIKey, *Response, error)
Update(context.Context, string, string, *APIKeyInput) (*APIKey, *Response, error)
Delete(context.Context, string, string) (*Response, error)
}
// APIKeysServiceOp handles communication with the APIKey related methods
// of the MongoDB Atlas API
type APIKeysServiceOp struct {
client *Client
}
var _ APIKeysService = &APIKeysServiceOp{}
// APIKeyInput represents MongoDB API key input request for Create.
type APIKeyInput struct {
Desc string `json:"desc,omitempty"`
Roles []string `json:"roles,omitempty"`
}
// APIKey represents MongoDB API Key.
type APIKey struct {
ID string `json:"id,omitempty"`
Desc string `json:"desc,omitempty"`
Roles []AtlasRole `json:"roles,omitempty"`
PrivateKey string `json:"privateKey,omitempty"`
PublicKey string `json:"publicKey,omitempty"`
}
// AtlasRole represents a role name of API key
type AtlasRole struct {
GroupID string `json:"groupId,omitempty"`
OrgID string `json:"orgId,omitempty"`
RoleName string `json:"roleName,omitempty"`
}
// apiKeysResponse is the response from the APIKeysService.List.
type apiKeysResponse struct {
Links []*Link `json:"links,omitempty"`
Results []APIKey `json:"results,omitempty"`
TotalCount int `json:"totalCount,omitempty"`
}
//List all API-KEY in the organization associated to {ORG-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/apiKeys-orgs-get-all/
func (s *APIKeysServiceOp) List(ctx context.Context, orgID string, listOptions *ListOptions) ([]APIKey, *Response, error) {
path := fmt.Sprintf(apiKeysPath, orgID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(apiKeysResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets the APIKey specified to {API-KEY-ID} from the organization associated to {ORG-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/apiKeys-orgs-get-one/
func (s *APIKeysServiceOp) Get(ctx context.Context, orgID string, apiKeyID string) (*APIKey, *Response, error) {
if apiKeyID == "" {
return nil, nil, NewArgError("name", "must be set")
}
basePath := fmt.Sprintf(apiKeysPath, orgID)
escapedEntry := url.PathEscape(apiKeyID)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(APIKey)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Create an API Key by the {ORG-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/apiKeys-orgs-create-one/
func (s *APIKeysServiceOp) Create(ctx context.Context, orgID string, createRequest *APIKeyInput) (*APIKey, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(apiKeysPath, orgID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(APIKey)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Update a API Key in the organization associated to {ORG-ID}
//See more: https://docs.atlas.mongodb.com/reference/api/apiKeys-orgs-update-one/
func (s *APIKeysServiceOp) Update(ctx context.Context, orgID string, apiKeyID string, updateRequest *APIKeyInput) (*APIKey, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(apiKeysPath, orgID)
path := fmt.Sprintf("%s/%s", basePath, apiKeyID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(APIKey)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete the API Key specified to {API-KEY-ID} from the organization associated to {ORG-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/apiKey-delete-one-apiKey/
func (s *APIKeysServiceOp) Delete(ctx context.Context, orgID string, apiKeyID string) (*Response, error) {
if apiKeyID == "" {
return nil, NewArgError("apiKeyID", "must be set")
}
basePath := fmt.Sprintf(apiKeysPath, orgID)
escapedEntry := url.PathEscape(apiKeyID)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,145 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
usersBasePath = "groups/%s/users"
)
// AtlasUsersService is an interface for interfacing with the AtlasUsers
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/users/
type AtlasUsersService interface {
List(context.Context, string, *ListOptions) ([]AtlasUser, *Response, error)
Get(context.Context, string) (*AtlasUser, *Response, error)
GetByName(context.Context, string) (*AtlasUser, *Response, error)
Create(context.Context, *AtlasUser) (*AtlasUser, *Response, error)
}
//AtlasUsersServiceOp handles communication with the AtlasUsers related methos of the
//MongoDB Atlas API
type AtlasUsersServiceOp struct {
client *Client
}
var _ AtlasUsersService = &AtlasUsersServiceOp{}
// AtlasUsers represents a array of project
type AtlasUsersResponse struct {
Links []*Link `json:"links"`
Results []AtlasUser `json:"results"`
TotalCount int `json:"totalCount"`
}
type AtlasUser struct {
EmailAddress string `json:"emailAddress"`
FirstName string `json:"firstName"`
ID string `json:"id,omitempty"`
LastName string `json:"lastName"`
Roles []AtlasRole `json:"roles"`
TeamIds []string `json:"teamIds,omitempty"`
Username string `json:"username"`
MobileNumber string `json:"mobileNumber"`
Password string `json:"password"`
Country string `json:"country"`
}
//List gets all users.
//See more: https://docs.atlas.mongodb.com/reference/api/user-get-all/
func (s *AtlasUsersServiceOp) List(ctx context.Context, orgID string, listOptions *ListOptions) ([]AtlasUser, *Response, error) {
path := fmt.Sprintf(usersBasePath, orgID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(AtlasUsersResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets a single atlas user.
//See more: https://docs.atlas.mongodb.com/reference/api/user-get-by-id/
func (s *AtlasUsersServiceOp) Get(ctx context.Context, userID string) (*AtlasUser, *Response, error) {
if userID == "" {
return nil, nil, NewArgError("userID", "must be set")
}
path := fmt.Sprintf("users/%s", userID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(AtlasUser)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//GetByName gets a single atlas user by name.
//See more: https://docs.atlas.mongodb.com/reference/api/user-get-one-by-name/
func (s *AtlasUsersServiceOp) GetByName(ctx context.Context, username string) (*AtlasUser, *Response, error) {
if username == "" {
return nil, nil, NewArgError("username", "must be set")
}
path := fmt.Sprintf("users/byName/%s", username)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(AtlasUser)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Create creates an Atlas User.
//See more: https://docs.atlas.mongodb.com/reference/api/user-create/
func (s *AtlasUsersServiceOp) Create(ctx context.Context, createRequest *AtlasUser) (*AtlasUser, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
req, err := s.client.NewRequest(ctx, http.MethodPost, fmt.Sprintf("users"), createRequest)
if err != nil {
return nil, nil, err
}
root := new(AtlasUser)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}

View File

@@ -0,0 +1,83 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
auditingsPath = "groups/%s/auditLog"
)
// AuditingsService is an interface for interfacing with the Auditing
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/auditing/
type AuditingsService interface {
Get(context.Context, string) (*Auditing, *Response, error)
Configure(context.Context, string, *Auditing) (*Auditing, *Response, error)
}
// AuditingsServiceOp handles communication with the Auditings related methods
// of the MongoDB Atlas API
type AuditingsServiceOp struct {
client *Client
}
var _ AuditingsService = &AuditingsServiceOp{}
// Auditing represents MongoDB Maintenance Windows
type Auditing struct {
AuditAuthorizationSuccess *bool `json:"auditAuthorizationSuccess,omitempty"` // Indicates whether the auditing system captures successful authentication attempts for audit filters using the "atype" : "authCheck" auditing event. For more information, see auditAuthorizationSuccess
AuditFilter string `json:"auditFilter,omitempty"` // JSON-formatted audit filter used by the project
ConfigurationType string `json:"configurationType,omitempty"` // Denotes the configuration method for the audit filter. Possible values are: NONE - auditing not configured for the project.m FILTER_BUILDER - auditing configured via Atlas UI filter builderm FILTER_JSON - auditing configured via Atlas custom filter or API
Enabled *bool `json:"enabled,omitempty"` // Denotes whether or not the project associated with the {GROUP-ID} has database auditing enabled.
}
// Get audit configuration for the project associated with {GROUP-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/auditing-get-auditLog/
func (s *AuditingsServiceOp) Get(ctx context.Context, groupID string) (*Auditing, *Response, error) {
if groupID == "" {
return nil, nil, NewArgError("groupID", "must be set")
}
path := fmt.Sprintf(auditingsPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Auditing)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Configure the audit configuration for the project associated with {GROUP-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/auditing-set-auditLog/
func (s *AuditingsServiceOp) Configure(ctx context.Context, groupID string, configRequest *Auditing) (*Auditing, *Response, error) {
if configRequest == nil {
return nil, nil, NewArgError("configRequest", "cannot be nil")
}
if groupID == "" {
return nil, nil, NewArgError("groupID", "cannot be nil")
}
path := fmt.Sprintf(auditingsPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, configRequest)
if err != nil {
return nil, nil, err
}
root := new(Auditing)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}

View File

@@ -0,0 +1,176 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
cloudProviderSnapshotRestoreJobBasePath = "groups"
)
// CloudProviderSnapshotRestoreJobsService is an interface for interfacing with the CloudProviderSnapshotRestoreJobs
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/cloudProviderSnapshotRestoreJobs/
type CloudProviderSnapshotRestoreJobsService interface {
List(context.Context, *SnapshotReqPathParameters) (*CloudProviderSnapshotRestoreJobs, *Response, error)
Get(context.Context, *SnapshotReqPathParameters) (*CloudProviderSnapshotRestoreJob, *Response, error)
Create(context.Context, *SnapshotReqPathParameters, *CloudProviderSnapshotRestoreJob) (*CloudProviderSnapshotRestoreJob, *Response, error)
Delete(context.Context, *SnapshotReqPathParameters) (*Response, error)
}
//CloudProviderSnapshotRestoreJobsServiceOp handles communication with the CloudProviderSnapshotRestoreJobs related methos of the
//MongoDB Atlas API
type CloudProviderSnapshotRestoreJobsServiceOp struct {
client *Client
}
var _ CloudProviderSnapshotRestoreJobsService = &CloudProviderSnapshotRestoreJobsServiceOp{}
// CloudProviderSnapshotRestoreJob represents the structure of a cloudProviderSnapshotRestoreJob.
type CloudProviderSnapshotRestoreJob struct {
ID string `json:"id,omitempty"` // The unique identifier of the restore job.
SnapshotID string `json:"snapshotId,omitempty"` // Unique identifier of the snapshot to restore.
DeliveryType string `json:"deliveryType,omitempty"` // Type of restore job to create. Possible values are: automated or download
DeliveryURL []string `json:"deliveryUrl,omitempty"` // One or more URLs for the compressed snapshot files for manual download. Only visible if deliveryType is download.
TargetClusterName string `json:"targetClusterName,omitempty"` // Name of the target Atlas cluster to which the restore job restores the snapshot. Only required if deliveryType is automated.
TargetGroupID string `json:"targetGroupId,omitempty"` // Unique ID of the target Atlas project for the specified targetClusterName. Only required if deliveryType is automated.
Cancelled bool `json:"cancelled,omitempty"` // Indicates whether the restore job was canceled.
CreatedAt string `json:"createdAt,omitempty"` // UTC ISO 8601 formatted point in time when Atlas created the restore job.
Expired bool `json:"expired,omitempty"` // Indicates whether the restore job expired.
ExpiresAt string `json:"expiresAt,omitempty"` // UTC ISO 8601 formatted point in time when the restore job expires.
FinishedAt string `json:"finishedAt,omitempty"` // UTC ISO 8601 formatted point in time when the restore job completed.
Links []*Link `json:"links,omitempty"` // One or more links to sub-resources and/or related resources. The relations between URLs are explained in the Web Linking Specification.
Timestamp string `json:"timestamp,omitempty"` // Timestamp in ISO 8601 date and time format in UTC when the snapshot associated to snapshotId was taken.
}
// CloudProviderSnapshotRestoreJobs represents an array of cloudProviderSnapshotRestoreJob
type CloudProviderSnapshotRestoreJobs struct {
Links []*Link `json:"links"`
Results []*CloudProviderSnapshotRestoreJob `json:"results"`
TotalCount int `json:"totalCount"`
}
//List gets all cloud provider snapshot restore jobs for the specified cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-restore-jobs-get-all/
func (s *CloudProviderSnapshotRestoreJobsServiceOp) List(ctx context.Context, requestParameters *SnapshotReqPathParameters) (*CloudProviderSnapshotRestoreJobs, *Response, error) {
if requestParameters.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, nil, NewArgError("clusterName", "must be set")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/restoreJobs", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(CloudProviderSnapshotRestoreJobs)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root, resp, nil
}
//Get gets one cloud provider snapshot restore jobs for the specified cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-restore-jobs-get-one/
func (s *CloudProviderSnapshotRestoreJobsServiceOp) Get(ctx context.Context, requestParameters *SnapshotReqPathParameters) (*CloudProviderSnapshotRestoreJob, *Response, error) {
if requestParameters.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, nil, NewArgError("clusterName", "must be set")
}
if requestParameters.JobID == "" {
return nil, nil, NewArgError("jobId", "must be set")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/restoreJobs/%s", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName, requestParameters.JobID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(CloudProviderSnapshotRestoreJob)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Create creates a new restore job from a cloud provider snapshot associated to the specified cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-restore-jobs-create-one/
func (s *CloudProviderSnapshotRestoreJobsServiceOp) Create(ctx context.Context, requestParameters *SnapshotReqPathParameters, createRequest *CloudProviderSnapshotRestoreJob) (*CloudProviderSnapshotRestoreJob, *Response, error) {
// Verify if is download or automated
if requestParameters.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, nil, NewArgError("clusterName", "must be set")
}
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
if createRequest.DeliveryType == "download" {
createRequest.TargetClusterName = ""
createRequest.TargetGroupID = ""
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/restoreJobs", cloudProviderSnapshotRestoreJobBasePath, requestParameters.GroupID, requestParameters.ClusterName)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(CloudProviderSnapshotRestoreJob)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root, resp, err
}
//Delete cancels the cloud provider snapshot manual download restore job associated to {JOB-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-restore-jobs-delete-one/
func (s *CloudProviderSnapshotRestoreJobsServiceOp) Delete(ctx context.Context, requestParameters *SnapshotReqPathParameters) (*Response, error) {
if requestParameters.GroupID == "" {
return nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, NewArgError("clusterName", "must be set")
}
if requestParameters.JobID == "" {
return nil, NewArgError("jobId", "must be set")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/restoreJobs/%s", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName, requestParameters.JobID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,173 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
cloudProviderSnapshotsBasePath = "groups"
)
// CloudProviderSnapshotsService is an interface for interfacing with the Cloud Provider Snapshots
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot/
type CloudProviderSnapshotsService interface {
GetAllCloudProviderSnapshots(context.Context, *SnapshotReqPathParameters) (*CloudProviderSnapshots, *Response, error)
GetOneCloudProviderSnapshot(context.Context, *SnapshotReqPathParameters) (*CloudProviderSnapshot, *Response, error)
Create(context.Context, *SnapshotReqPathParameters, *CloudProviderSnapshot) (*CloudProviderSnapshot, *Response, error)
Delete(context.Context, *SnapshotReqPathParameters) (*Response, error)
}
//CloudProviderSnapshotsServiceOp handles communication with the DatabaseUsers related methos of the
//MongoDB Atlas API
type CloudProviderSnapshotsServiceOp struct {
client *Client
}
var _ CloudProviderSnapshotsService = &CloudProviderSnapshotsServiceOp{}
// CloudProviderSnapshot represents a cloud provider snapshot.
type CloudProviderSnapshot struct {
ID string `json:"id,omitempty"` // Unique identifier of the snapshot.
RetentionInDays int `json:"retentionInDays,omitempty"` // The number of days that Atlas should retain the on-demand snapshot. Must be at least 1 .
CreatedAt string `json:"createdAt,omitempty"` // UTC ISO 8601 formatted point in time when Atlas took the snapshot.
ExpiresAt string `json:"expiresAt,omitempty"` // UTC ISO 8601 formatted point in time when Atlas will delete the snapshot.
Description string `json:"description,omitempty"` // Description of the on-demand snapshot.
Links []*Link `json:"links,omitempty"` // One or more links to sub-resources and/or related resources.
MasterKeyUUID string `json:"masterKeyUUID,omitempty"` // Unique ID of the AWS KMS Customer Master Key used to encrypt the snapshot. Only visible for clusters using Encryption at Rest via Customer KMS.
MongodVersion string `json:"mongodVersion,omitempty"` // Version of the MongoDB server.
SnapshotType string `json:"snapshotType,omitempty"` // Specified the type of snapshot. Valid values are onDemand and scheduled.
Status string `json:"status,omitempty"` // Current status of the snapshot. One of the following values: queued, inProgress, completed, failed
StorageSizeBytes int `json:"storageSizeBytes,omitempty"` // Specifies the size of the snapshot in bytes.
Type string `json:"type,omitempty"` // Specifies the type of cluster: replicaSet or shardedCluster.
}
// CloudProviderSnapshots represents all cloud provider snapshots.
type CloudProviderSnapshots struct {
Results []*CloudProviderSnapshot `json:"results,omitempty"` // Includes one CloudProviderSnapshot object for each item detailed in the results array section.
Links []*Link `json:"links,omitempty"` // One or more links to sub-resources and/or related resources.
TotalCount int `json:"totalCount,omitempty"` // Count of the total number of items in the result set. It may be greater than the number of objects in the results array if the entire result set is paginated.
}
// SnapshotReqPathParameters represents all the pissible parameters to make the request
type SnapshotReqPathParameters struct {
GroupID string `json:"groupId,omitempty"` // The unique identifier of the project for the Atlas cluster.
SnapshotID string `json:"snapshotId,omitempty"` // The unique identifier of the snapshot you want to retrieve.
ClusterName string `json:"clusterName,omitempty"` // The name of the Atlas cluster that contains the snapshots you want to retrieve.
JobID string `json:"jobId,omitempty"` //The unique identifier of the restore job to retrieve.
}
//GetAllCloudProviderSnapshots gets all cloud provider snapshots for the specified cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-get-all/
func (s *CloudProviderSnapshotsServiceOp) GetAllCloudProviderSnapshots(ctx context.Context, requestParameters *SnapshotReqPathParameters) (*CloudProviderSnapshots, *Response, error) {
if requestParameters.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, nil, NewArgError("clusterName", "must be set")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/snapshots", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(CloudProviderSnapshots)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root, resp, nil
}
//GetOneCloudProviderSnapshot gets the snapshot associated to {SNAPSHOT-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-get-one/
func (s *CloudProviderSnapshotsServiceOp) GetOneCloudProviderSnapshot(ctx context.Context, requestParameters *SnapshotReqPathParameters) (*CloudProviderSnapshot, *Response, error) {
if requestParameters.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, nil, NewArgError("clusterName", "must be set")
}
if requestParameters.SnapshotID == "" {
return nil, nil, NewArgError("snapshotId", "must be set")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/snapshots/%s", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName, requestParameters.SnapshotID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(CloudProviderSnapshot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Create takes one on-demand snapshot. Atlas takes on-demand snapshots immediately, unlike scheduled snapshots which occur at regular intervals.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-take-one-ondemand/
func (s *CloudProviderSnapshotsServiceOp) Create(ctx context.Context, requestParameters *SnapshotReqPathParameters, createRequest *CloudProviderSnapshot) (*CloudProviderSnapshot, *Response, error) {
if requestParameters.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, nil, NewArgError("clusterName", "must be set")
}
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/snapshots", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(CloudProviderSnapshot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete deletes the snapshot associated to {SNAPSHOT-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-delete-one/
func (s *CloudProviderSnapshotsServiceOp) Delete(ctx context.Context, requestParameters *SnapshotReqPathParameters) (*Response, error) {
if requestParameters.GroupID == "" {
return nil, NewArgError("groupId", "must be set")
}
if requestParameters.ClusterName == "" {
return nil, NewArgError("clusterName", "must be set")
}
if requestParameters.SnapshotID == "" {
return nil, NewArgError("snapshotId", "must be set")
}
path := fmt.Sprintf("%s/%s/clusters/%s/backup/snapshots/%s", cloudProviderSnapshotsBasePath, requestParameters.GroupID, requestParameters.ClusterName, requestParameters.SnapshotID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,348 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
"net/url"
)
const clustersPath = "groups/%s/clusters"
//ClustersService is an interface for interfacing with the Clusters
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/clusters/
type ClustersService interface {
List(context.Context, string, *ListOptions) ([]Cluster, *Response, error)
Get(context.Context, string, string) (*Cluster, *Response, error)
Create(context.Context, string, *Cluster) (*Cluster, *Response, error)
Update(context.Context, string, string, *Cluster) (*Cluster, *Response, error)
Delete(context.Context, string, string) (*Response, error)
UpdateProcessArgs(context.Context, string, string, *ProcessArgs) (*ProcessArgs, *Response, error)
GetProcessArgs(context.Context, string, string) (*ProcessArgs, *Response, error)
}
//ClustersServiceOp handles communication with the Cluster related methods
// of the MongoDB Atlas API
type ClustersServiceOp struct {
client *Client
}
var _ ClustersService = &ClustersServiceOp{}
// AutoScaling configures your cluster to automatically scale its storage
type AutoScaling struct {
DiskGBEnabled *bool `json:"diskGBEnabled,omitempty"`
}
// BiConnector specifies BI Connector for Atlas configuration on this cluster
type BiConnector struct {
Enabled *bool `json:"enabled,omitempty"`
ReadPreference string `json:"readPreference,omitempty"`
}
// ProviderSettings configuration for the provisioned servers on which MongoDB runs. The available options are specific to the cloud service provider.
type ProviderSettings struct {
BackingProviderName string `json:"backingProviderName,omitempty"`
DiskIOPS *int64 `json:"diskIOPS,omitempty"`
DiskTypeName string `json:"diskTypeName,omitempty"`
EncryptEBSVolume *bool `json:"encryptEBSVolume,omitempty"`
InstanceSizeName string `json:"instanceSizeName,omitempty"`
ProviderName string `json:"providerName,omitempty"`
RegionName string `json:"regionName,omitempty"`
VolumeType string `json:"volumeType,omitempty"`
}
// RegionsConfig describes the regions priority in elections and the number and type of MongoDB nodes Atlas deploys to the region.
type RegionsConfig struct {
AnalyticsNodes *int64 `json:"analyticsNodes,omitempty"`
ElectableNodes *int64 `json:"electableNodes,omitempty"`
Priority *int64 `json:"priority,omitempty"`
ReadOnlyNodes *int64 `json:"readOnlyNodes,omitempty"`
}
// ReplicationSpec represents a configuration for cluster regions
type ReplicationSpec struct {
ID string `json:"id,omitempty"`
NumShards *int64 `json:"numShards,omitempty"`
ZoneName string `json:"zoneName,omitempty"`
RegionsConfig map[string]RegionsConfig `json:"regionsConfig,omitempty"`
}
// Cluster represents MongoDB cluster.
type Cluster struct {
AutoScaling AutoScaling `json:"autoScaling,omitempty"`
BackupEnabled *bool `json:"backupEnabled,omitempty"`
BiConnector BiConnector `json:"biConnector,omitempty"`
ClusterType string `json:"clusterType,omitempty"`
DiskSizeGB *float64 `json:"diskSizeGB,omitempty"`
EncryptionAtRestProvider string `json:"encryptionAtRestProvider,omitempty"`
ID string `json:"id,omitempty"`
GroupID string `json:"groupId,omitempty"`
MongoDBVersion string `json:"mongoDBVersion,omitempty"`
MongoDBMajorVersion string `json:"mongoDBMajorVersion,omitempty"`
MongoURI string `json:"mongoURI,omitempty"`
MongoURIUpdated string `json:"mongoURIUpdated,omitempty"`
MongoURIWithOptions string `json:"mongoURIWithOptions,omitempty"`
Name string `json:"name,omitempty"`
NumShards *int64 `json:"numShards,omitempty"`
Paused *bool `json:"paused,omitempty"`
ProviderBackupEnabled *bool `json:"providerBackupEnabled,omitempty"`
ProviderSettings *ProviderSettings `json:"providerSettings,omitempty"`
ReplicationFactor *int64 `json:"replicationFactor,omitempty"`
ReplicationSpec map[string]RegionsConfig `json:"replicationSpec,omitempty"`
ReplicationSpecs []ReplicationSpec `json:"replicationSpecs,omitempty"`
SrvAddress string `json:"srvAddress,omitempty"`
StateName string `json:"stateName,omitempty"`
}
// ProcessArgs represents the advanced configuration options for the cluster
type ProcessArgs struct {
FailIndexKeyTooLong *bool `json:"failIndexKeyTooLong,omitempty"`
JavascriptEnabled *bool `json:"javascriptEnabled,omitempty"`
MinimumEnabledTLSProtocol string `json:"minimumEnabledTlsProtocol,omitempty"`
NoTableScan *bool `json:"noTableScan,omitempty"`
OplogSizeMB *int64 `json:"oplogSizeMB,omitempty"`
SampleSizeBIConnector *int64 `json:"sampleSizeBIConnector,omitempty"`
SampleRefreshIntervalBIConnector *int64 `json:"sampleRefreshIntervalBIConnector,omitempty"`
}
// clustersResponse is the response from the ClustersService.List.
type clustersResponse struct {
Links []*Link `json:"links,omitempty"`
Results []Cluster `json:"results,omitempty"`
TotalCount int `json:"totalCount,omitempty"`
}
// DefaultDiskSizeGB represents the Tier and the default disk size for each one
// it can be use like: DefaultDiskSizeGB["AWS"]["M10"]
var DefaultDiskSizeGB map[string]map[string]float64 = map[string]map[string]float64{
"TENANT": {
"M2": 2,
"M5": 5,
},
"AWS": {
"M10": 10,
"M20": 20,
"M30": 40,
"M40": 80,
"R40": 80,
"M40_NVME": 380,
"M50": 160,
"R50": 160,
"M50_NVME": 760,
"M60": 320,
"R60": 320,
"M60_NVME": 1600,
"M80": 750,
"R80": 750,
"M80_NVME": 1600,
"M140": 1000,
"M200": 1500,
"R200": 1500,
"M200_NVME": 3100,
"M300": 2000,
"R300": 2000,
"R400": 3000,
"M400_NVME": 4000,
},
"GCP": {
"M10": 10,
"M20": 20,
"M30": 40,
"M40": 80,
"M50": 160,
"M60": 320,
"M80": 750,
"M200": 1500,
"M300": 2200,
},
"AZURE": {
"M10": 32,
"M20": 32,
"M30": 32,
"M40": 128,
"M50": 128,
"M60": 128,
"M80": 256,
"M200": 256,
},
}
//List all clusters in the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/clusters-get-all/
func (s *ClustersServiceOp) List(ctx context.Context, groupID string, listOptions *ListOptions) ([]Cluster, *Response, error) {
path := fmt.Sprintf(clustersPath, groupID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(clustersResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets the cluster specified to {ClUSTER-NAME} from the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/clusters-get-one/
func (s *ClustersServiceOp) Get(ctx context.Context, groupID string, clusterName string) (*Cluster, *Response, error) {
if err := checkClusterNameParam(clusterName); err != nil {
return nil, nil, err
}
basePath := fmt.Sprintf(clustersPath, groupID)
escapedEntry := url.PathEscape(clusterName)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Cluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Create adds a cluster to the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/clusters-create-one/
func (s *ClustersServiceOp) Create(ctx context.Context, groupID string, createRequest *Cluster) (*Cluster, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(clustersPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(Cluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Update a cluster in the project associated to {GROUP-ID}
//See more: https://docs.atlas.mongodb.com/reference/api/clusters-modify-one/
func (s *ClustersServiceOp) Update(ctx context.Context, groupID string, clusterName string, updateRequest *Cluster) (*Cluster, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(clustersPath, groupID)
path := fmt.Sprintf("%s/%s", basePath, clusterName)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(Cluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete the cluster specified to {CLUSTER-NAME} from the project associated to {GROUP-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/clusters-delete-one/
func (s *ClustersServiceOp) Delete(ctx context.Context, groupID string, clusterName string) (*Response, error) {
if clusterName == "" {
return nil, NewArgError("clusterName", "must be set")
}
basePath := fmt.Sprintf(clustersPath, groupID)
escapedEntry := url.PathEscape(clusterName)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
//UpdateProcessArgs Modifies Advanced Configuration Options for One Cluster
//See more: https://docs.atlas.mongodb.com/reference/api/clusters-modify-advanced-configuration-options/
func (s *ClustersServiceOp) UpdateProcessArgs(ctx context.Context, groupID string, clusterName string, updateRequest *ProcessArgs) (*ProcessArgs, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(clustersPath, groupID)
path := fmt.Sprintf("%s/%s/processArgs", basePath, clusterName)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(ProcessArgs)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//GetProcessArgs gets the Advanced Configuration Options for One Cluster
//See more: https://docs.atlas.mongodb.com/reference/api/clusters-get-advanced-configuration-options/#get-advanced-configuration-options-for-one-cluster
func (s *ClustersServiceOp) GetProcessArgs(ctx context.Context, groupID string, clusterName string) (*ProcessArgs, *Response, error) {
if err := checkClusterNameParam(clusterName); err != nil {
return nil, nil, err
}
basePath := fmt.Sprintf(clustersPath, groupID)
escapedEntry := url.PathEscape(clusterName)
path := fmt.Sprintf("%s/%s/processArgs", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(ProcessArgs)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
func checkClusterNameParam(clusterName string) error {
if clusterName == "" {
return NewArgError("name", "must be set")
}
return nil
}

View File

@@ -0,0 +1,181 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
"net/url"
)
const containersPath = "groups/%s/containers"
//ContainersService is an interface for interfacing with the Network Peering Containers
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc/
type ContainersService interface {
List(context.Context, string, *ContainersListOptions) ([]Container, *Response, error)
Get(context.Context, string, string) (*Container, *Response, error)
Create(context.Context, string, *Container) (*Container, *Response, error)
Update(context.Context, string, string, *Container) (*Container, *Response, error)
Delete(context.Context, string, string) (*Response, error)
}
//ContainersServiceOp handles communication with the Network Peering Container related methods
// of the MongoDB Atlas API
type ContainersServiceOp struct {
client *Client
}
var _ ContainersService = &ContainersServiceOp{}
type ContainersListOptions struct {
ProviderName string `url:"providerName,omitempty"`
ListOptions
}
// Container represents MongoDB network peering containter.
type Container struct {
AtlasCIDRBlock string `json:"atlasCidrBlock,omitempty"`
AzureSubscriptionID string `json:"azureSubscriptionId,omitempty"`
GCPProjectID string `json:"gcpProjectId,omitempty"`
ID string `json:"id,omitempty"`
NetworkName string `json:"networkName,omitempty"`
ProviderName string `json:"providerName,omitempty"`
Provisioned *bool `json:"provisioned,omitempty"`
Region string `json:"region,omitempty"`
RegionName string `json:"regionName,omitempty"`
VNetName string `json:"vnetName,omitempty"`
VPCID string `json:"vpcId,omitempty"`
}
// containersResponse is the response from the ContainersService.List.
type containersResponse struct {
Links []*Link `json:"links,omitempty"`
Results []Container `json:"results,omitempty"`
TotalCount int `json:"totalCount,omitempty"`
}
//List all containers in the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-get-containers-list/
func (s *ContainersServiceOp) List(ctx context.Context, groupID string, listOptions *ContainersListOptions) ([]Container, *Response, error) {
path := fmt.Sprintf(containersPath, groupID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(containersResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets the network peering container specified to {CONTAINER-ID} from the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-get-container/
func (s *ContainersServiceOp) Get(ctx context.Context, groupID string, containerID string) (*Container, *Response, error) {
if containerID == "" {
return nil, nil, NewArgError("perrID", "must be set")
}
basePath := fmt.Sprintf(containersPath, groupID)
escapedEntry := url.PathEscape(containerID)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Container)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Add a network peering container to the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-create-container/
func (s *ContainersServiceOp) Create(ctx context.Context, groupID string, createRequest *Container) (*Container, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(containersPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(Container)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Update a network peering container in the project associated to {GROUP-ID}
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-update-container/
func (s *ContainersServiceOp) Update(ctx context.Context, groupID string, containerID string, updateRequest *Container) (*Container, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(containersPath, groupID)
path := fmt.Sprintf("%s/%s", basePath, containerID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(Container)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete the network peering container specified to {CONTAINER-ID} from the project associated to {GROUP-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/vpc-delete-one-container/
func (s *ContainersServiceOp) Delete(ctx context.Context, groupID string, containerID string) (*Response, error) {
if containerID == "" {
return nil, NewArgError("containerID", "must be set")
}
basePath := fmt.Sprintf(containersPath, groupID)
escapedEntry := url.PathEscape(containerID)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
//To avoid API Issues
req.Header.Del("Content-Type")
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,169 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const dbCustomDBRolesBasePath = "groups/%s/customDBRoles/roles"
// CustomDBRolesService is an interface for working wit the Custom MongoDB Roles
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/custom-roles/
type CustomDBRolesService interface {
List(context.Context, string, *ListOptions) (*[]CustomDBRole, *Response, error)
Get(context.Context, string, string) (*CustomDBRole, *Response, error)
Create(context.Context, string, *CustomDBRole) (*CustomDBRole, *Response, error)
Update(context.Context, string, string, *CustomDBRole) (*CustomDBRole, *Response, error)
Delete(context.Context, string, string) (*Response, error)
}
//CustomDBRolesServiceOp handles communication with the CustomDBRoles related methods of the
//MongoDB Atlas API
type CustomDBRolesServiceOp struct {
client *Client
}
var _ CustomDBRolesService = &CustomDBRolesServiceOp{}
// A Resource describes a specific resource the Role will allow operating on.
type Resource struct {
Collection string `json:"collection,omitempty"`
Db string `json:"db,omitempty"`
Cluster bool `json:"cluster,omitempty"`
}
// An Action describes the operation the role will include, for a specific set of Resources.
type Action struct {
Action string `json:"action,omitempty"`
Resources []Resource `json:"resources,omitempty"`
}
// An InheritedRole describes the role that this Role inherits from.
type InheritedRole struct {
Db string `json:"db,omitempty"`
Role string `json:"role,omitempty"`
}
// CustomDBRole represents a Custom MongoDB Role in your cluster.
type CustomDBRole struct {
Actions []Action `json:"actions,omitempty"`
InheritedRoles []InheritedRole `json:"inheritedRoles,omitempty"`
RoleName string `json:"roleName,omitempty"`
}
//List gets all custom db roles in the project.
//See more: https://docs.atlas.mongodb.com/reference/api/custom-roles-get-all-roles/
func (s *CustomDBRolesServiceOp) List(ctx context.Context, groupID string, listOptions *ListOptions) (*[]CustomDBRole, *Response, error) {
path := fmt.Sprintf(dbCustomDBRolesBasePath, groupID)
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new([]CustomDBRole)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, nil
}
// Gets a single Custom MongoDB Role in the project.
// See more: https://docs.atlas.mongodb.com/reference/api/custom-roles-get-single-role/
func (s *CustomDBRolesServiceOp) Get(ctx context.Context, groupID string, roleName string) (*CustomDBRole, *Response, error) {
if roleName == "" {
return nil, nil, NewArgError("roleName", "must be set")
}
basePath := fmt.Sprintf(dbCustomDBRolesBasePath, groupID)
path := fmt.Sprintf("%s/%s", basePath, roleName)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(CustomDBRole)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Creates a new Custom MongoDB Role in the project.
// See more: https://docs.atlas.mongodb.com/reference/api/custom-roles-create-a-role/
func (s *CustomDBRolesServiceOp) Create(ctx context.Context, groupID string, createRequest *CustomDBRole) (*CustomDBRole, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(dbCustomDBRolesBasePath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(CustomDBRole)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Updates a single Custom MongoDB Role.
// See more: https://docs.atlas.mongodb.com/reference/api/custom-roles-update-a-role/
func (s *CustomDBRolesServiceOp) Update(ctx context.Context, groupID string, roleName string, updateRequest *CustomDBRole) (*CustomDBRole, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(dbCustomDBRolesBasePath, groupID)
path := fmt.Sprintf("%s/%s", basePath, roleName)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(CustomDBRole)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Deletes a single Custom MongoDB Role.
// See more: https://docs.atlas.mongodb.com/reference/api/custom-roles-delete-a-role/
func (s *CustomDBRolesServiceOp) Delete(ctx context.Context, groupID string, roleName string) (*Response, error) {
if roleName == "" {
return nil, NewArgError("roleName", "must be set")
}
basePath := fmt.Sprintf(dbCustomDBRolesBasePath, groupID)
path := fmt.Sprintf("%s/%s", basePath, roleName)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,174 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const dbUsersBasePath = "groups/%s/databaseUsers"
//DatabaseUsersService is an interface for interfacing with the Database Users
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users/index.html
type DatabaseUsersService interface {
List(context.Context, string, *ListOptions) ([]DatabaseUser, *Response, error)
Get(context.Context, string, string) (*DatabaseUser, *Response, error)
Create(context.Context, string, *DatabaseUser) (*DatabaseUser, *Response, error)
Update(context.Context, string, string, *DatabaseUser) (*DatabaseUser, *Response, error)
Delete(context.Context, string, string) (*Response, error)
}
//DatabaseUsersServiceOp handles communication with the DatabaseUsers related methos of the
//MongoDB Atlas API
type DatabaseUsersServiceOp struct {
client *Client
}
var _ DatabaseUsersService = &DatabaseUsersServiceOp{}
// Role allows the user to perform particular actions on the specified database.
// A role on the admin database can include privileges that apply to the other databases as well.
type Role struct {
RoleName string `json:"roleName,omitempty"`
DatabaseName string `json:"databaseName,omitempty"`
CollectionName string `json:"collectionName,omitempty"`
}
// DatabaseUser represents MongoDB users in your cluster.
type DatabaseUser struct {
Roles []Role `json:"roles,omitempty"`
GroupID string `json:"groupId,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
DatabaseName string `json:"databaseName,omitempty"`
LDAPAuthType string `json:"ldapAuthType,omitempty"`
DeleteAfterDate string `json:"deleteAfterDate,omitempty"`
}
// databaseUserListResponse is the response from the DatabaseUserService.List.
type databaseUsers struct {
Links []*Link `json:"links"`
Results []DatabaseUser `json:"results"`
TotalCount int `json:"totalCount"`
}
//List gets all users in the project.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users-get-all-users/
func (s *DatabaseUsersServiceOp) List(ctx context.Context, groupID string, listOptions *ListOptions) ([]DatabaseUser, *Response, error) {
path := fmt.Sprintf(dbUsersBasePath, groupID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseUsers)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets a single user in the project.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users-get-single-user/
func (s *DatabaseUsersServiceOp) Get(ctx context.Context, groupID string, username string) (*DatabaseUser, *Response, error) {
if username == "" {
return nil, nil, NewArgError("username", "must be set")
}
basePath := fmt.Sprintf(dbUsersBasePath, groupID)
path := fmt.Sprintf("%s/admin/%s", basePath, username)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(DatabaseUser)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Create creates a user for the project.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users-create-a-user/
func (s *DatabaseUsersServiceOp) Create(ctx context.Context, groupID string, createRequest *DatabaseUser) (*DatabaseUser, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(dbUsersBasePath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(DatabaseUser)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Update updates a user for the project.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users-update-a-user/
func (s *DatabaseUsersServiceOp) Update(ctx context.Context, groupID string, username string, updateRequest *DatabaseUser) (*DatabaseUser, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(dbUsersBasePath, groupID)
path := fmt.Sprintf("%s/admin/%s", basePath, username)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(DatabaseUser)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete deletes a user for the project.
// See more: https://docs.atlas.mongodb.com/reference/api/database-users-delete-a-user/
func (s *DatabaseUsersServiceOp) Delete(ctx context.Context, groupID string, username string) (*Response, error) {
if username == "" {
return nil, NewArgError("username", "must be set")
}
basePath := fmt.Sprintf(dbUsersBasePath, groupID)
path := fmt.Sprintf("%s/admin/%s", basePath, username)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,180 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
"github.com/mwielbut/pointy"
)
const (
// CaCentral1 represents the CA_CENTRAL_1 America region for AWS Configuration
CaCentral1 = "CA_CENTRAL_1"
// UsEast1 represents the US_EAST_1 America region for AWS Configuration
UsEast1 = "US_EAST_1"
// UsEast2 represents the US_EAST_2 America region for AWS Configuration
UsEast2 = "US_EAST_2"
// UsWest1 represents the US_WEST_1 America region for AWS Configuration
UsWest1 = "US_WEST_1"
// UsWest2 represents the US_WEST_2 America region for AWS Configuration
UsWest2 = "US_WEST_2"
// SaEast1 represents the SA_EAST_1 America region for AWS Configuration
SaEast1 = "SA_EAST_1"
// ApNortheast1 represents the AP_NORTHEAST_1 Asia region for AWS Configuration
ApNortheast1 = "AP_NORTHEAST_1"
// ApNortheast2 represents the AP_NORTHEAST_2 Asia region for AWS Configuration
ApNortheast2 = "AP_NORTHEAST_2"
// ApSouth1 represents the AP_SOUTH_1 Asia region for AWS Configuration
ApSouth1 = "AP_SOUTH_1"
// ApSoutheast1 represents the AP_SOUTHEAST_1 Asia region for AWS Configuration
ApSoutheast1 = "AP_SOUTHEAST_1"
// ApSoutheast2 represents the AP_SOUTHEAST_2 Asia region for AWS Configuration
ApSoutheast2 = "AP_SOUTHEAST_2"
// EuCentral1 represents the EU_CENTRAL_1 Europe region for AWS Configuration
EuCentral1 = "EU_CENTRAL_1"
// EuWest1 represents the EU_WEST_1 Europe region for AWS Configuration
EuWest1 = "EU_WEST_1"
// EuWest2 represents the EU_WEST_2 Europe region for AWS Configuration
EuWest2 = "EU_WEST_2"
// EuWest3 represents the EU_WEST_3 Europe region for AWS Configuration
EuWest3 = "EU_WEST_3"
// Azure represents `AZURE` where the Azure account credencials reside
Azure = "AZURE"
// AzureChina represents `AZURE_CHINA` AZURE where the Azure account credencials reside
AzureChina = "AZURE_CHINA"
// AzureGermany represents `AZURE_GERMANY` AZURE where the Azure account credencials reside
AzureGermany = "AZURE_GERMANY"
encryptionsAtRestBasePath = "groups/%s/encryptionAtRest"
)
// EncryptionsAtRestService is an interface for interfacing with the Encryption at Rest
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/encryption-at-rest/
type EncryptionsAtRestService interface {
Create(context.Context, *EncryptionAtRest) (*EncryptionAtRest, *Response, error)
Get(context.Context, string) (*EncryptionAtRest, *Response, error)
Delete(context.Context, string) (*Response, error)
}
//EncryptionsAtRestServiceOp handles communication with the DatabaseUsers related methos of the
//MongoDB Atlas API
type EncryptionsAtRestServiceOp struct {
client *Client
}
var _ EncryptionsAtRestService = &EncryptionsAtRestServiceOp{}
// EncryptionAtRest represents a configuration Encryption at Rest for an Atlas project.
type EncryptionAtRest struct {
GroupID string `json:"groupId,omitempty"` // The unique identifier for the project.
AwsKms `json:"awsKms,omitempty"` // AwsKms specifies AWS KMS configuration details and whether Encryption at Rest is enabled for an Atlas project.
AzureKeyVault `json:"azureKeyVault,omitempty"` // AzureKeyVault specifies Azure Key Vault configuration details and whether Encryption at Rest is enabled for an Atlas project.
GoogleCloudKms `json:"googleCloudKms,omitempty"` // Specifies GCP KMS configuration details and whether Encryption at Rest is enabled for an Atlas project.
}
// AwsKms specifies AWS KMS configuration details and whether Encryption at Rest is enabled for an Atlas project.
type AwsKms struct {
Enabled *bool `json:"enabled,omitempty"` // Specifies whether Encryption at Rest is enabled for an Atlas project, To disable Encryption at Rest, pass only this parameter with a value of false, When you disable Encryption at Rest, Atlas also removes the configuration details.
AccessKeyID string `json:"accessKeyID,omitempty"` // The IAM access key ID with permissions to access the customer master key specified by customerMasterKeyID.
SecretAccessKey string `json:"secretAccessKey,omitempty"` // The IAM secret access key with permissions to access the customer master key specified by customerMasterKeyID.
CustomerMasterKeyID string `json:"customerMasterKeyID,omitempty"` // The AWS customer master key used to encrypt and decrypt the MongoDB master keys.
Region string `json:"region,omitempty"` // The AWS region in which the AWS customer master key exists: CA_CENTRAL_1, US_EAST_1, US_EAST_2, US_WEST_1, US_WEST_2, SA_EAST_1
}
// AzureKeyVault specifies Azure Key Vault configuration details and whether Encryption at Rest is enabled for an Atlas project.
type AzureKeyVault struct {
Enabled *bool `json:"enabled,omitempty"` // Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
ClientID string `json:"clientID,omitempty"` // The client ID, also known as the application ID, for an Azure application associated with the Azure AD tenant.
AzureEnvironment string `json:"azureEnvironment,omitempty"` // The Azure environment where the Azure account credentials reside. Valid values are the following: AZURE, AZURE_CHINA, AZURE_GERMANY
SubscriptionID string `json:"subscriptionID,omitempty"` // The unique identifier associated with an Azure subscription.
ResourceGroupName string `json:"resourceGroupName,omitempty"` // The name of the Azure Resource group that contains an Azure Key Vault.
KeyVaultName string `json:"keyVaultName,omitempty"` // The name of an Azure Key Vault containing your key.
KeyIdentifier string `json:"keyIdentifier,omitempty"` // The unique identifier of a key in an Azure Key Vault.
Secret string `json:"secret,omitempty"` // The secret associated with the Azure Key Vault specified by azureKeyVault.tenantID.
TenantID string `json:"tenantID,omitempty"` // The unique identifier for an Azure AD tenant within an Azure subscription.
}
// GoogleCloudKms specifies GCP KMS configuration details and whether Encryption at Rest is enabled for an Atlas project.
type GoogleCloudKms struct {
Enabled *bool `json:"enabled,omitempty"` // Specifies whether Encryption at Rest is enabled for an Atlas project. To disable Encryption at Rest, pass only this parameter with a value of false. When you disable Encryption at Rest, Atlas also removes the configuration details.
ServiceAccountKey string `json:"serviceAccountKey,omitempty"` // String-formatted JSON object containing GCP KMS credentials from your GCP account.
KeyVersionResourceID string `json:"keyVersionResourceID,omitempty"` // The Key Version Resource ID from your GCP account.
}
//Create takes one on-demand snapshot. Atlas takes on-demand snapshots immediately, unlike scheduled snapshots which occur at regular intervals.
//See more: https://docs.atlas.mongodb.com/reference/api/enable-configure-encryptionatrest/
func (s *EncryptionsAtRestServiceOp) Create(ctx context.Context, createRequest *EncryptionAtRest) (*EncryptionAtRest, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
if createRequest.GroupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
path := fmt.Sprintf(encryptionsAtRestBasePath, createRequest.GroupID)
createRequest.GroupID = ""
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(EncryptionAtRest)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Get retrieves the current configuration for Encryption at Rest for an Atlas project.
//See more: https://docs.atlas.mongodb.com/reference/api/get-configuration-encryptionatrest/
func (s *EncryptionsAtRestServiceOp) Get(ctx context.Context, groupID string) (*EncryptionAtRest, *Response, error) {
if groupID == "" {
return nil, nil, NewArgError("groupId", "must be set")
}
path := fmt.Sprintf(encryptionsAtRestBasePath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(EncryptionAtRest)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete disable the AWS, Azure and Google Encryption at Rest.
// See more: https://docs.atlas.mongodb.com/reference/api/enable-configure-encryptionatrest/
func (s *EncryptionsAtRestServiceOp) Delete(ctx context.Context, groupID string) (*Response, error) {
if groupID == "" {
return nil, NewArgError("groupId", "must be set")
}
createRequest := EncryptionAtRest{
AwsKms: AwsKms{Enabled: pointy.Bool(false)},
AzureKeyVault: AzureKeyVault{Enabled: pointy.Bool(false)},
GoogleCloudKms: GoogleCloudKms{Enabled: pointy.Bool(false)},
}
path := fmt.Sprintf(encryptionsAtRestBasePath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, createRequest)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,24 @@
package mongodbatlas
import "fmt"
// ArgError is an error that represents an error with an input to godo. It
// identifies the argument and the cause (if possible).
type ArgError struct {
arg string
reason string
}
var _ error = &ArgError{}
// NewArgError creates an InputError.
func NewArgError(arg, reason string) *ArgError {
return &ArgError{
arg: arg,
reason: reason,
}
}
func (e *ArgError) Error() string {
return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason)
}

View File

@@ -0,0 +1,167 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const globalClustersBasePath = "groups/%s/clusters/%s/globalWrites/%s"
//GlobalClustersService is an interface for interfacing with the Global Clusters
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/global-clusters/
type GlobalClustersService interface {
Get(context.Context, string, string) (*GlobalCluster, *Response, error)
AddManagedNamespace(context.Context, string, string, *ManagedNamespace) (*GlobalCluster, *Response, error)
DeleteManagedNamespace(context.Context, string, string, *ManagedNamespace) (*GlobalCluster, *Response, error)
AddCustomZoneMappings(context.Context, string, string, *CustomZoneMappingsRequest) (*GlobalCluster, *Response, error)
DeleteCustomZoneMappings(context.Context, string, string) (*GlobalCluster, *Response, error)
}
//GlobalClustersServiceOp handles communication with the GlobalClusters related methos of the
//MongoDB Atlas API
type GlobalClustersServiceOp struct {
client *Client
}
var _ GlobalClustersService = &GlobalClustersServiceOp{}
// GlobalCluster represents MongoDB Global Cluster Configuration in your Global Cluster.
type GlobalCluster struct {
CustomZoneMapping map[string]string `json:"customZoneMapping"`
ManagedNamespaces []ManagedNamespace `json:"managedNamespaces"`
}
//ManagedNamespace represents the information about managed namespace configuration.
type ManagedNamespace struct {
Db string `json:"db"`
Collection string `json:"collection"`
CustomShardKey string `json:"customShardKey,omitempty"`
}
//CustomZoneMappingsRequest represents the request related to add custom zone mappings to a global cluster.
type CustomZoneMappingsRequest struct {
CustomZoneMappings []CustomZoneMapping `json:"customZoneMappings"`
}
//CustomZoneMapping represents the custom zone mapping.
type CustomZoneMapping struct {
Location string `json:"location"`
Zone string `json:"zone"`
}
//Get retrieves all managed namespaces and custom zone mappings associated with the specified Global Cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/global-clusters-retrieve-namespaces/
func (s *GlobalClustersServiceOp) Get(ctx context.Context, groupID string, clusterName string) (*GlobalCluster, *Response, error) {
if clusterName == "" {
return nil, nil, NewArgError("username", "must be set")
}
path := fmt.Sprintf("groups/%s/clusters/%s/globalWrites", groupID, clusterName)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(GlobalCluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//AddManagedNamespace adds a managed namespace to the specified Global Cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users-create-a-user/
func (s *GlobalClustersServiceOp) AddManagedNamespace(ctx context.Context, groupID string, clusterName string, createRequest *ManagedNamespace) (*GlobalCluster, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(globalClustersBasePath, groupID, clusterName, "managedNamespaces")
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(GlobalCluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//DeleteManagedNamespace deletes the managed namespace configuration of the global cluster given.
//See more: https://docs.atlas.mongodb.com/reference/api/global-clusters-delete-namespace/
func (s *GlobalClustersServiceOp) DeleteManagedNamespace(ctx context.Context, groupID string, clusterName string, deleteRequest *ManagedNamespace) (*GlobalCluster, *Response, error) {
if deleteRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(globalClustersBasePath, groupID, clusterName, "managedNamespaces")
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, nil, err
}
q := req.URL.Query()
q.Add("collection", deleteRequest.Collection)
q.Add("db", deleteRequest.Db)
req.URL.RawQuery = q.Encode()
root := new(GlobalCluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//AddCustomZoneMappings adds an entry to the list of custom zone mappings for the specified Global Cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/global-clusters-add-customzonemapping/
func (s *GlobalClustersServiceOp) AddCustomZoneMappings(ctx context.Context, groupID string, clusterName string, createRequest *CustomZoneMappingsRequest) (*GlobalCluster, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(globalClustersBasePath, groupID, clusterName, "customZoneMapping")
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(GlobalCluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//DeleteCustomZoneMappings removes all custom zone mappings from the specified Global Cluster.
//See more: https://docs.atlas.mongodb.com/reference/api/global-clusters-delete-namespace/
func (s *GlobalClustersServiceOp) DeleteCustomZoneMappings(ctx context.Context, groupID string, clusterName string) (*GlobalCluster, *Response, error) {
path := fmt.Sprintf(globalClustersBasePath, groupID, clusterName, "customZoneMapping")
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, nil, err
}
root := new(GlobalCluster)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}

View File

@@ -0,0 +1,21 @@
package mongodbatlas
import "net/url"
//Link is the link to sub-resources and/or related resources.
type Link struct {
Rel string `json:"rel,omitempty"`
Href string `json:"href,omitempty"`
}
func (l *Link) getHrefURL() (*url.URL, error) {
return url.Parse(l.Href)
}
func (l *Link) getHrefQueryParam(param string) (string, error) {
hrefURL, err := l.getHrefURL()
if err != nil {
return "", err
}
return hrefURL.Query().Get(param), nil
}

View File

@@ -0,0 +1,136 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
maintenanceWindowsPath = "groups/%s/maintenanceWindow"
// Sunday day of the week when you would like the maintenance window to start
Sunday = 1
// Monday day of the week when you would like the maintenance window to start
Monday = 2
// Tuesday day of the week when you would like the maintenance window to start
Tuesday = 3
// Wednesday day of the week when you would like the maintenance window to start
Wednesday = 4
// Thursday day of the week when you would like the maintenance window to start
Thursday = 5
// Friday day of the week when you would like the maintenance window to start
Friday = 6
// Saturday day of the week when you would like the maintenance window to start
Saturday = 7
)
// MaintenanceWindowsService is an interface for interfacing with the Maintenance Windows
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/maintenance-windows/
type MaintenanceWindowsService interface {
Get(context.Context, string) (*MaintenanceWindow, *Response, error)
Update(context.Context, string, *MaintenanceWindow) (*Response, error)
Defer(context.Context, string) (*Response, error)
Reset(context.Context, string) (*Response, error)
}
// MaintenanceWindowsServiceOp handles communication with the MaintenanceWindows related methods
// of the MongoDB Atlas API
type MaintenanceWindowsServiceOp struct {
client *Client
}
var _ MaintenanceWindowsService = &MaintenanceWindowsServiceOp{}
// MaintenanceWindow represents MongoDB Maintenance Windows
type MaintenanceWindow struct {
DayOfWeek int `json:"dayOfWeek,omitempty"` // Day of the week when you would like the maintenance window to start as a 1-based integer.Sunday 1, Monday 2, Tuesday 3, Wednesday 4, Thursday 5, Friday 6, Saturday 7
HourOfDay *int `json:"hourOfDay,omitempty"` // Hour of the day when you would like the maintenance window to start. This parameter uses the 24-hour clock, where midnight is 0, noon is 12.
StartASAP *bool `json:"startASAP,omitempty"` // Flag indicating whether project maintenance has been directed to start immediately.
NumberOfDeferrals int `json:"numberOfDeferrals,omitempty"` // Number of times the current maintenance event for this project has been deferred.
}
// Get gets the current user-defined maintenance window for the given project.
// See more: https://docs.atlas.mongodb.com/reference/api/maintenance-windows-view-in-one-project/
func (s *MaintenanceWindowsServiceOp) Get(ctx context.Context, groupID string) (*MaintenanceWindow, *Response, error) {
if groupID == "" {
return nil, nil, NewArgError("groupID", "must be set")
}
path := fmt.Sprintf(maintenanceWindowsPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(MaintenanceWindow)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Update the current maintenance window for the given project.
// See more: https://docs.atlas.mongodb.com/reference/api/maintenance-window-update/
func (s *MaintenanceWindowsServiceOp) Update(ctx context.Context, groupID string, updateRequest *MaintenanceWindow) (*Response, error) {
if updateRequest == nil {
return nil, NewArgError("updateRequest", "cannot be nil")
}
if groupID == "" {
return nil, NewArgError("groupID", "cannot be nil")
}
path := fmt.Sprintf(maintenanceWindowsPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, err
}
// Defer maintenance for the given project for one week.
// See more: https://docs.atlas.mongodb.com/reference/api/maintenance-window-defer/
func (s *MaintenanceWindowsServiceOp) Defer(ctx context.Context, groupID string) (*Response, error) {
if groupID == "" {
return nil, NewArgError("groupID", "cannot be nil")
}
path := fmt.Sprintf(maintenanceWindowsPath+"/defer", groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
// Reset clears the current maintenance window for the given project.
// See more: https://docs.atlas.mongodb.com/reference/api/maintenance-window-clear/
func (s *MaintenanceWindowsServiceOp) Reset(ctx context.Context, groupID string) (*Response, error) {
if groupID == "" {
return nil, NewArgError("groupID", "must be set")
}
path := fmt.Sprintf(maintenanceWindowsPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,346 @@
package mongodbatlas
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"reflect"
"strconv"
"github.com/google/go-querystring/query"
)
const (
libraryVersion = "0.1"
defaultBaseURL = "https://cloud.mongodb.com/api/atlas/v1.0/"
userAgent = "go-mongodbatlas" + libraryVersion
mediaType = "application/json"
)
// Client manages communication with MongoDBAtlas v1.0 API
type Client struct {
client *http.Client
BaseURL *url.URL
UserAgent string
//Services used for communicating with the API
CustomDBRoles CustomDBRolesService
DatabaseUsers DatabaseUsersService
ProjectIPWhitelist ProjectIPWhitelistService
Projects ProjectsService
Clusters ClustersService
CloudProviderSnapshots CloudProviderSnapshotsService
APIKeys APIKeysService
ProjectAPIKeys ProjectAPIKeysService
CloudProviderSnapshotRestoreJobs CloudProviderSnapshotRestoreJobsService
Peers PeersService
Containers ContainersService
EncryptionsAtRest EncryptionsAtRestService
WhitelistAPIKeys WhitelistAPIKeysService
PrivateIPMode PrivateIpModeService
MaintenanceWindows MaintenanceWindowsService
Teams TeamsService
AtlasUsers AtlasUsersService
GlobalClusters GlobalClustersService
Auditing AuditingsService
onRequestCompleted RequestCompletionCallback
}
// RequestCompletionCallback defines the type of the request callback function
type RequestCompletionCallback func(*http.Request, *http.Response)
// Response is a MongoDBAtlas response. This wraps the standard http.Response returned from MongoDBAtlas API.
type Response struct {
*http.Response
// Links that were returned with the response.
Links []*Link `json:"links"`
}
// ListOptions specifies the optional parameters to List methods that
// support pagination.
type ListOptions struct {
// For paginated result sets, page of results to retrieve.
PageNum int `url:"pageNum,omitempty"`
// For paginated result sets, the number of results to include per page.
ItemsPerPage int `url:"itemsPerPage,omitempty"`
}
//ErrorResponse reports the error caused by an API request.
type ErrorResponse struct {
// HTTP response that caused this error
Response *http.Response
//The error code, which is simply the HTTP status code.
ErrorCode int `json:"Error"`
//A short description of the error, which is simply the HTTP status phrase.
Reason string `json:"reason"`
//A more detailed description of the error.
Detail string `json:"detail,omitempty"`
}
func (resp *Response) getCurrentPageLink() (*Link, error) {
if link := resp.getLinkByRef("self"); link != nil {
return link, nil
}
return nil, errors.New("no self link found")
}
func (resp *Response) getLinkByRef(ref string) *Link {
for i := range resp.Links {
if resp.Links[i].Rel == ref {
return resp.Links[i]
}
}
return nil
}
//IsLastPage returns true if the current page is the last page
func (resp *Response) IsLastPage() bool {
return resp.getLinkByRef("next") == nil
}
//CurrentPage gets the current page for list pagination request.
func (resp *Response) CurrentPage() (int, error) {
link, err := resp.getCurrentPageLink()
if err != nil {
return 0, err
}
pageNumStr, err := link.getHrefQueryParam("pageNum")
if err != nil {
return 0, err
}
pageNum, err := strconv.Atoi(pageNumStr)
if err != nil {
return 0, fmt.Errorf("error getting current page: %s", err)
}
return pageNum, nil
}
// NewClient returns a new MongoDBAtlas API Client
func NewClient(httpClient *http.Client) *Client {
if httpClient == nil {
httpClient = http.DefaultClient
}
baseURL, _ := url.Parse(defaultBaseURL)
c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}
c.APIKeys = &APIKeysServiceOp{client: c}
c.CloudProviderSnapshots = &CloudProviderSnapshotsServiceOp{client: c}
c.CloudProviderSnapshotRestoreJobs = &CloudProviderSnapshotRestoreJobsServiceOp{client: c}
c.Clusters = &ClustersServiceOp{client: c}
c.Containers = &ContainersServiceOp{client: c}
c.CustomDBRoles = &CustomDBRolesServiceOp{client: c}
c.DatabaseUsers = &DatabaseUsersServiceOp{client: c}
c.EncryptionsAtRest = &EncryptionsAtRestServiceOp{client: c}
c.Projects = &ProjectsServiceOp{client: c}
c.ProjectAPIKeys = &ProjectAPIKeysOp{client: c}
c.Peers = &PeersServiceOp{client: c}
c.ProjectIPWhitelist = &ProjectIPWhitelistServiceOp{client: c}
c.WhitelistAPIKeys = &WhitelistAPIKeysServiceOp{client: c}
c.PrivateIPMode = &PrivateIpModeServiceOp{client: c}
c.MaintenanceWindows = &MaintenanceWindowsServiceOp{client: c}
c.Teams = &TeamsServiceOp{client: c}
c.AtlasUsers = &AtlasUsersServiceOp{client: c}
c.GlobalClusters = &GlobalClustersServiceOp{client: c}
c.Auditing = &AuditingsServiceOp{client: c}
return c
}
// ClientOpt are options for New.
type ClientOpt func(*Client) error
// New returns a new MongoDBAtlas API client instance.
func New(httpClient *http.Client, opts ...ClientOpt) (*Client, error) {
c := NewClient(httpClient)
for _, opt := range opts {
if err := opt(c); err != nil {
return nil, err
}
}
return c, nil
}
// SetBaseURL is a client option for setting the base URL.
func SetBaseURL(bu string) ClientOpt {
return func(c *Client) error {
u, err := url.Parse(bu)
if err != nil {
return err
}
c.BaseURL = u
return nil
}
}
// SetUserAgent is a client option for setting the user agent.
func SetUserAgent(ua string) ClientOpt {
return func(c *Client) error {
c.UserAgent = fmt.Sprintf("%s %s", ua, c.UserAgent)
return nil
}
}
// NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the
// BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the
// value pointed to by body is JSON encoded and included in as the request body.
func (c *Client) NewRequest(ctx context.Context, method, urlStr string, body interface{}) (*http.Request, error) {
rel, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
u := c.BaseURL.ResolveReference(rel)
buf := new(bytes.Buffer)
if body != nil {
err = json.NewEncoder(buf).Encode(body)
if err != nil {
return nil, err
}
}
req, err := http.NewRequest(method, u.String(), buf)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", mediaType)
req.Header.Add("Accept", mediaType)
req.Header.Add("User-Agent", c.UserAgent)
return req, nil
}
// OnRequestCompleted sets the DO API request completion callback
func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) {
c.onRequestCompleted = rc
}
// Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value
// pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface,
// the raw response will be written to v, without attempting to decode it.
func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) {
resp, err := DoRequestWithClient(ctx, c.client, req)
if err != nil {
return nil, err
}
if c.onRequestCompleted != nil {
c.onRequestCompleted(req, resp)
}
defer func() {
if rerr := resp.Body.Close(); err == nil {
err = rerr
}
}()
response := newResponse(resp)
err = CheckResponse(resp)
if err != nil {
return response, err
}
if v != nil {
if w, ok := v.(io.Writer); ok {
_, err = io.Copy(w, resp.Body)
if err != nil {
return nil, err
}
} else {
err = json.NewDecoder(resp.Body).Decode(v)
if err != nil {
return nil, err
}
}
}
return response, err
}
func (r *ErrorResponse) Error() string {
return fmt.Sprintf("%v %v: %d (request %q) %v",
r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Reason, r.Detail)
}
// CheckResponse checks the API response for errors, and returns them if present. A response is considered an
// error if it has a status code outside the 200 range. API error responses are expected to have either no response
// body, or a JSON response body that maps to ErrorResponse. Any other response body will be silently ignored.
func CheckResponse(r *http.Response) error {
if c := r.StatusCode; c >= 200 && c <= 299 {
return nil
}
errorResponse := &ErrorResponse{Response: r}
data, err := ioutil.ReadAll(r.Body)
if err == nil && len(data) > 0 {
err := json.Unmarshal(data, errorResponse)
if err != nil {
log.Printf("[DEBUG] unmarshal error response: %s", err)
errorResponse.Reason = string(data)
}
}
return errorResponse
}
// newResponse creates a new Response for the provided http.Response
func newResponse(r *http.Response) *Response {
response := Response{Response: r}
return &response
}
// DoRequestWithClient submits an HTTP request using the specified client.
func DoRequestWithClient(
ctx context.Context,
client *http.Client,
req *http.Request) (*http.Response, error) {
req = req.WithContext(ctx)
return client.Do(req)
}
func setListOptions(s string, opt interface{}) (string, error) {
v := reflect.ValueOf(opt)
if v.Kind() == reflect.Ptr && v.IsNil() {
return s, nil
}
origURL, err := url.Parse(s)
if err != nil {
return s, err
}
origValues := origURL.Query()
newValues, err := query.Values(opt)
if err != nil {
return s, err
}
for k, v := range newValues {
origValues[k] = v
}
origURL.RawQuery = origValues.Encode()
return origURL.String(), nil
}

View File

@@ -0,0 +1,182 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
"net/url"
)
const peersPath = "groups/%s/peers"
//PeersService is an interface for interfacing with the Peers
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/peers/
type PeersService interface {
List(context.Context, string, *ListOptions) ([]Peer, *Response, error)
Get(context.Context, string, string) (*Peer, *Response, error)
Create(context.Context, string, *Peer) (*Peer, *Response, error)
Update(context.Context, string, string, *Peer) (*Peer, *Response, error)
Delete(context.Context, string, string) (*Response, error)
}
//PeersServiceOp handles communication with the Network Peering Connection related methods
// of the MongoDB Atlas API
type PeersServiceOp struct {
client *Client
}
var _ PeersService = &PeersServiceOp{}
// Peer represents MongoDB peer connection.
type Peer struct {
AccepterRegionName string `json:"accepterRegionName,omitempty"`
AWSAccountId string `json:"awsAccountId,omitempty"`
ConnectionID string `json:"connectionId,omitempty"`
ContainerID string `json:"containerId,omitempty"`
ErrorStateName string `json:"errorStateName,omitempty"`
ID string `json:"id,omitempty"`
ProviderName string `json:"providerName,omitempty"`
RouteTableCIDRBlock string `json:"routeTableCidrBlock,omitempty"`
StatusName string `json:"statusName,omitempty"`
VpcID string `json:"vpcId,omitempty"`
AtlasCIDRBlock string `json:"atlasCidrBlock,omitempty"`
AzureDirectoryID string `json:"azureDirectoryId,omitempty"`
AzureSubscriptionId string `json:"azureSubscriptionId,omitempty"`
ResourceGroupName string `json:"resourceGroupName,omitempty"`
VNetName string `json:"vnetName,omitempty"`
ErrorState string `json:"errorState,omitempty"`
Status string `json:"status,omitempty"`
GCPProjectID string `json:"gcpProjectId,omitempty"`
NetworkName string `json:"networkName,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
}
// peersResponse is the response from the PeersService.List.
type peersResponse struct {
Links []*Link `json:"links,omitempty"`
Results []Peer `json:"results,omitempty"`
TotalCount int `json:"totalCount,omitempty"`
}
//List all peers in the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-get-connections-list/
func (s *PeersServiceOp) List(ctx context.Context, groupID string, listOptions *ListOptions) ([]Peer, *Response, error) {
path := fmt.Sprintf(peersPath, groupID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(peersResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets the netwprk peering connection specified to {PEER-ID} from the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-get-connection/
func (s *PeersServiceOp) Get(ctx context.Context, groupID string, peerID string) (*Peer, *Response, error) {
if peerID == "" {
return nil, nil, NewArgError("perrID", "must be set")
}
basePath := fmt.Sprintf(peersPath, groupID)
escapedEntry := url.PathEscape(peerID)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Peer)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Add a peer connection to the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-create-peering-connection/
func (s *PeersServiceOp) Create(ctx context.Context, groupID string, createRequest *Peer) (*Peer, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(peersPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(Peer)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Update a peer connection in the project associated to {GROUP-ID}
//See more: https://docs.atlas.mongodb.com/reference/api/vpc-update-peering-connection/
func (s *PeersServiceOp) Update(ctx context.Context, groupID string, peerID string, updateRequest *Peer) (*Peer, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(peersPath, groupID)
path := fmt.Sprintf("%s/%s", basePath, peerID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(Peer)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete the peer connection specified to {PEER-ID} from the project associated to {GROUP-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/vpc-delete-peering-connection/
func (s *PeersServiceOp) Delete(ctx context.Context, groupID string, peerID string) (*Response, error) {
if peerID == "" {
return nil, NewArgError("peerID", "must be set")
}
basePath := fmt.Sprintf(peersPath, groupID)
escapedEntry := url.PathEscape(peerID)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,72 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const privateIpModePath = "groups/%s/privateIpMode"
//PrivateIpModeService is an interface for interfacing with the PrivateIpMode
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/get-private-ip-mode-for-project/
type PrivateIpModeService interface {
Get(context.Context, string) (*PrivateIPMode, *Response, error)
Update(context.Context, string, *PrivateIPMode) (*PrivateIPMode, *Response, error)
}
//PrivateIpModeServiceOp handles communication with the Private IP Mode related methods
// of the MongoDB Atlas API
type PrivateIpModeServiceOp struct {
client *Client
}
var _ PrivateIpModeService = &PrivateIpModeServiceOp{}
// PrivateIPMode represents MongoDB Private IP Mode Configutation.
type PrivateIPMode struct {
Enabled *bool `json:"enabled,omitempty"`
}
//Get Verify Connect via Peering Only Mode from the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/get-private-ip-mode-for-project/
func (s *PrivateIpModeServiceOp) Get(ctx context.Context, groupID string) (*PrivateIPMode, *Response, error) {
path := fmt.Sprintf(privateIpModePath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(PrivateIPMode)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Update connection via Peering Only Mode in the project associated to {GROUP-ID}
//See more: https://docs.atlas.mongodb.com/reference/api/set-private-ip-mode-for-project/
func (s *PrivateIpModeServiceOp) Update(ctx context.Context, groupID string, updateRequest *PrivateIPMode) (*PrivateIPMode, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
path := fmt.Sprintf(privateIpModePath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(PrivateIPMode)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}

View File

@@ -0,0 +1,134 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const projectAPIKeysPath = "groups/%s/apiKeys"
//ProjectAPIKeysService is an interface for interfacing with the APIKeys
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/apiKeys/#organization-api-keys-on-projects-endpoints
type ProjectAPIKeysService interface {
List(context.Context, string, *ListOptions) ([]APIKey, *Response, error)
Create(context.Context, string, *APIKeyInput) (*APIKey, *Response, error)
Assign(context.Context, string, string, *AssignAPIKey) (*Response, error)
Unassign(context.Context, string, string) (*Response, error)
}
//ProjectAPIKeysOp handles communication with the APIKey related methods
// of the MongoDB Atlas API
type ProjectAPIKeysOp struct {
client *Client
}
var _ ProjectAPIKeysService = &ProjectAPIKeysOp{}
// AssignAPIKey contains the roles to be assigned to an Organization API key into a Project
type AssignAPIKey struct {
Roles []string `json:"roles"`
}
//List all API-KEY in the organization associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/projectApiKeys/get-all-apiKeys-in-one-project/
func (s *ProjectAPIKeysOp) List(ctx context.Context, groupID string, listOptions *ListOptions) ([]APIKey, *Response, error) {
path := fmt.Sprintf(projectAPIKeysPath, groupID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(apiKeysResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Create an API Key by the {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/projectApiKeys/create-one-apiKey-in-one-project/
func (s *ProjectAPIKeysOp) Create(ctx context.Context, groupID string, createRequest *APIKeyInput) (*APIKey, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(projectAPIKeysPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(APIKey)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Assign an API-KEY related to {GROUP-ID} to a the project with {API-KEY-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/projectApiKeys/assign-one-org-apiKey-to-one-project/
func (s *ProjectAPIKeysOp) Assign(ctx context.Context, groupID string, keyID string, assignAPIKeyRequest *AssignAPIKey) (*Response, error) {
if groupID == "" {
return nil, NewArgError("groupID", "must be set")
}
if keyID == "" {
return nil, NewArgError("keyID", "must be set")
}
basePath := fmt.Sprintf(projectAPIKeysPath, groupID)
path := fmt.Sprintf("%s/%s", basePath, keyID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, assignAPIKeyRequest)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
//Unassign an API-KEY related to {GROUP-ID} to a the project with {API-KEY-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/projectApiKeys/delete-one-apiKey-in-one-project/
func (s *ProjectAPIKeysOp) Unassign(ctx context.Context, groupID string, keyID string) (*Response, error) {
if groupID == "" {
return nil, NewArgError("apiKeyID", "must be set")
}
if keyID == "" {
return nil, NewArgError("keyID", "must be set")
}
basePath := fmt.Sprintf(projectAPIKeysPath, groupID)
path := fmt.Sprintf("%s/%s", basePath, keyID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,175 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
"net/url"
)
const projectIPWhitelistPath = "groups/%s/whitelist"
//ProjectIPWhitelistService is an interface for interfacing with the Project IP Whitelist
// endpoints of the MongoDB Atlas API.
//See more: https://docs.atlas.mongodb.com/reference/api/whitelist/
type ProjectIPWhitelistService interface {
List(context.Context, string, *ListOptions) ([]ProjectIPWhitelist, *Response, error)
Get(context.Context, string, string) (*ProjectIPWhitelist, *Response, error)
Create(context.Context, string, []*ProjectIPWhitelist) ([]ProjectIPWhitelist, *Response, error)
Update(context.Context, string, string, []*ProjectIPWhitelist) ([]ProjectIPWhitelist, *Response, error)
Delete(context.Context, string, string) (*Response, error)
}
//ProjectIPWhitelistServiceOp handles communication with the ProjectIPWhitelist related methods
// of the MongoDB Atlas API
type ProjectIPWhitelistServiceOp struct {
client *Client
}
var _ ProjectIPWhitelistService = &ProjectIPWhitelistServiceOp{}
// ProjectIPWhitelist represents MongoDB project's IP whitelist.
type ProjectIPWhitelist struct {
Comment string `json:"comment,omitempty"`
GroupID string `json:"groupId,omitempty"`
CIDRBlock string `json:"cidrBlock,omitempty"`
IPAddress string `json:"ipAddress,omitempty"`
DeleteAfterDate string `json:"deleteAfterDate,omitempty"`
}
// projectIPWhitelistsResponse is the response from the ProjectIPWhitelistService.List.
type projectIPWhitelistsResponse struct {
Links []*Link `json:"links"`
Results []ProjectIPWhitelist `json:"results"`
TotalCount int `json:"totalCount"`
}
//List all whitelist entries in the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/whitelist-get-all/
func (s *ProjectIPWhitelistServiceOp) List(ctx context.Context, groupID string, listOptions *ListOptions) ([]ProjectIPWhitelist, *Response, error) {
path := fmt.Sprintf(projectIPWhitelistPath, groupID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(projectIPWhitelistsResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets the whitelist entry specified to {WHITELIST-ENTRY} from the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/whitelist-get-one-entry/
func (s *ProjectIPWhitelistServiceOp) Get(ctx context.Context, groupID string, whiteListEntry string) (*ProjectIPWhitelist, *Response, error) {
if whiteListEntry == "" {
return nil, nil, NewArgError("whiteListEntry", "must be set")
}
basePath := fmt.Sprintf(projectIPWhitelistPath, groupID)
escapedEntry := url.PathEscape(whiteListEntry)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(ProjectIPWhitelist)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Add one or more whitelist entries to the project associated to {GROUP-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/database-users-create-a-user/
func (s *ProjectIPWhitelistServiceOp) Create(ctx context.Context, groupID string, createRequest []*ProjectIPWhitelist) ([]ProjectIPWhitelist, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(projectIPWhitelistPath, groupID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(projectIPWhitelistsResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, err
}
//Update one or more whitelist entries in the project associated to {GROUP-ID}
//See more: https://docs.atlas.mongodb.com/reference/api/whitelist-update-one/
func (s *ProjectIPWhitelistServiceOp) Update(ctx context.Context, groupID string, whitelistEntry string, updateRequest []*ProjectIPWhitelist) ([]ProjectIPWhitelist, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
basePath := fmt.Sprintf(projectIPWhitelistPath, groupID)
path := fmt.Sprintf("%s/%s", basePath, whitelistEntry)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(projectIPWhitelistsResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, err
}
//Delete the whitelist entry specified to {WHITELIST-ENTRY} from the project associated to {GROUP-ID}.
// See more: https://docs.atlas.mongodb.com/reference/api/whitelist-delete-one/
func (s *ProjectIPWhitelistServiceOp) Delete(ctx context.Context, groupID string, whitelistEntry string) (*Response, error) {
if whitelistEntry == "" {
return nil, NewArgError("whitelistEntry", "must be set")
}
basePath := fmt.Sprintf(projectIPWhitelistPath, groupID)
escapedEntry := url.PathEscape(whitelistEntry)
path := fmt.Sprintf("%s/%s", basePath, escapedEntry)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

View File

@@ -0,0 +1,233 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
GROUP_OWNER = "GROUP_OWNER" //GROUP_OWNER - Project Owner
GROUP_READ_ONLY = "GROUP_READ_ONLY" //GROUP_READ_ONLY - Project Read Only
GROUP_DATA_ACCESS_ADMIN = "GROUP_DATA_ACCESS_ADMIN" //GROUP_DATA_ACCESS_ADMIN - Project Data Access Admin
GROUP_DATA_ACCESS_READ_WRITE = "GROUP_DATA_ACCESS_READ_WRITE" //GROUP_DATA_ACCESS_READ_WRITE - Project Data Access Read/Write
GROUP_DATA_ACCESS_READ_ONLY = "GROUP_DATA_ACCESS_READ_ONLY" //GROUP_DATA_ACCESS_READ_ONLY - Project Data Access Read Only
projectBasePath = "groups"
)
// ProjectsService is an interface for interfacing with the Projects
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/projects/
type ProjectsService interface {
GetAllProjects(context.Context) (*Projects, *Response, error)
GetOneProject(context.Context, string) (*Project, *Response, error)
GetOneProjectByName(context.Context, string) (*Project, *Response, error)
Create(context.Context, *Project) (*Project, *Response, error)
Delete(context.Context, string) (*Response, error)
GetProjectTeamsAssigned(context.Context, string) (*TeamsAssigned, *Response, error)
AddTeamsToProject(context.Context, string, *ProjectTeam) (*TeamsAssigned, *Response, error)
}
//ProjectsServiceOp handles communication with the Projects related methos of the
//MongoDB Atlas API
type ProjectsServiceOp struct {
client *Client
}
var _ ProjectsService = &ProjectsServiceOp{}
// Project represents the structure of a project.
type Project struct {
ID string `json:"id,omitempty"`
OrgID string `json:"orgId,omitempty"`
Name string `json:"name,omitempty"`
ClusterCount int `json:"clusterCount,omitempty"`
Created string `json:"created,omitempty"`
Links []*Link `json:"links,omitempty"`
}
// Projects represents a array of project
type Projects struct {
Links []*Link `json:"links"`
Results []*Project `json:"results"`
TotalCount int `json:"totalCount"`
}
// Result is part og TeamsAssigned structure
type Result struct {
Links []*Link `json:"links"`
RoleNames []string `json:"roleNames"`
TeamID string `json:"teamId"`
}
// RoleName represents the kind of user role in your project
type RoleName struct {
RoleName string `json:"rolesNames"`
}
// ProjectTeam reperesents the kind of role that has the team
type ProjectTeam struct {
TeamID string `json:"teamId"`
Roles []*RoleName `json:"roles"`
}
// TeamsAssigned represents the one team assigned to the project.
type TeamsAssigned struct {
Links []*Link `json:"links"`
Results []*Result `json:"results"`
TotalCount int `json:"totalCount"`
}
//GetAllProjects gets all project.
//See more: https://docs.atlas.mongodb.com/reference/api/project-get-all/
func (s *ProjectsServiceOp) GetAllProjects(ctx context.Context) (*Projects, *Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodGet, projectBasePath, nil)
if err != nil {
return nil, nil, err
}
root := new(Projects)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root, resp, nil
}
//GetOneProject gets a single project.
//See more: https://docs.atlas.mongodb.com/reference/api/project-get-one/
func (s *ProjectsServiceOp) GetOneProject(ctx context.Context, projectID string) (*Project, *Response, error) {
if projectID == "" {
return nil, nil, NewArgError("projectID", "must be set")
}
path := fmt.Sprintf("%s/%s", projectBasePath, projectID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Project)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//GetOneProjectByName gets a single project by its name.
//See more: https://docs.atlas.mongodb.com/reference/api/project-get-one-by-name/
func (s *ProjectsServiceOp) GetOneProjectByName(ctx context.Context, projectName string) (*Project, *Response, error) {
if projectName == "" {
return nil, nil, NewArgError("projectName", "must be set")
}
path := fmt.Sprintf("%s/byName/%s", projectBasePath, projectName)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Project)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Create creates a project.
//See more: https://docs.atlas.mongodb.com/reference/api/project-create-one/
func (s *ProjectsServiceOp) Create(ctx context.Context, createRequest *Project) (*Project, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
req, err := s.client.NewRequest(ctx, http.MethodPost, projectBasePath, createRequest)
if err != nil {
return nil, nil, err
}
root := new(Project)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//Delete deletes a project.
// See more: https://docs.atlas.mongodb.com/reference/api/project-delete-one/
func (s *ProjectsServiceOp) Delete(ctx context.Context, projectID string) (*Response, error) {
if projectID == "" {
return nil, NewArgError("projectID", "must be set")
}
basePath := fmt.Sprintf("%s/%s", projectBasePath, projectID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, basePath, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
//GetProjectTeamsAssigned gets all the teams assigned to a project.
//See more: https://docs.atlas.mongodb.com/reference/api/project-get-teams/
func (s *ProjectsServiceOp) GetProjectTeamsAssigned(ctx context.Context, projectID string) (*TeamsAssigned, *Response, error) {
if projectID == "" {
return nil, nil, NewArgError("projectID", "must be set")
}
path := fmt.Sprintf("%s/%s/teams", projectBasePath, projectID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(TeamsAssigned)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//AddTeamsToProject adds teams to a project
//See more: https://docs.atlas.mongodb.com/reference/api/project-add-team/
func (s *ProjectsServiceOp) AddTeamsToProject(ctx context.Context, projectID string, createRequest *ProjectTeam) (*TeamsAssigned, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf("%s/%s/teams", projectBasePath, projectID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(TeamsAssigned)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}

View File

@@ -0,0 +1,351 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const (
teamsBasePath = "orgs/%s/teams"
)
// TeamsService is an interface for interfacing with the Teams
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/teams/
type TeamsService interface {
List(context.Context, string, *ListOptions) ([]Team, *Response, error)
Get(context.Context, string, string) (*Team, *Response, error)
GetOneTeamByName(context.Context, string, string) (*Team, *Response, error)
GetTeamUsersAssigned(context.Context, string, string) ([]AtlasUser, *Response, error)
Create(context.Context, string, *Team) (*Team, *Response, error)
Rename(context.Context, string, string, string) (*Team, *Response, error)
UpdateTeamRoles(context.Context, string, string, *TeamUpdateRoles) ([]TeamRoles, *Response, error)
AddUserToTeam(context.Context, string, string, string) ([]AtlasUser, *Response, error)
RemoveUserToTeam(context.Context, string, string, string) (*Response, error)
RemoveTeamFromOrganization(context.Context, string, string) (*Response, error)
RemoveTeamFromProject(context.Context, string, string) (*Response, error)
}
//TeamsServiceOp handles communication with the Teams related methos of the
//MongoDB Atlas API
type TeamsServiceOp struct {
client *Client
}
var _ TeamsService = &TeamsServiceOp{}
// Teams represents a array of project
type TeamsResponse struct {
Links []*Link `json:"links"`
Results []Team `json:"results"`
TotalCount int `json:"totalCount"`
}
type Team struct {
ID string `json:"id,omitempty"`
Name string `json:"name"`
Usernames []string `json:"usernames,omitempty"`
}
//AtlasUserAssigned represents the user assigned to the project.
type AtlasUserAssigned struct {
Links []*Link `json:"links"`
Results []AtlasUser `json:"results"`
TotalCount int `json:"totalCount"`
}
type TeamUpdateRoles struct {
RoleNames []string `json:"roleNames"`
}
type TeamUpdateRolesResponse struct {
Links []*Link `json:"links"`
Results []TeamRoles `json:"results"`
TotalCount int `json:"totalCount"`
}
type TeamRoles struct {
Links []*Link `json:"links"`
RoleNames []string `json:"roleNames"`
TeamID string `json:"teamId"`
}
//GetAllTeams gets all teams.
//See more: https://docs.atlas.mongodb.com/reference/api/project-get-all/
func (s *TeamsServiceOp) List(ctx context.Context, orgID string, listOptions *ListOptions) ([]Team, *Response, error) {
path := fmt.Sprintf(teamsBasePath, orgID)
//Add query params from listOptions
path, err := setListOptions(path, listOptions)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(TeamsResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Get gets a single team in the organization by team ID.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-get-one-by-id/
func (s *TeamsServiceOp) Get(ctx context.Context, orgID string, teamID string) (*Team, *Response, error) {
if teamID == "" {
return nil, nil, NewArgError("teamID", "must be set")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s", basePath, teamID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Team)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//GetOneTeamByName gets a single project by its name.
//See more: https://docs.atlas.mongodb.com/reference/api/project-get-one-by-name/
func (s *TeamsServiceOp) GetOneTeamByName(ctx context.Context, orgID, teamName string) (*Team, *Response, error) {
if teamName == "" {
return nil, nil, NewArgError("teamName", "must be set")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/byName/%s", basePath, teamName)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Team)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//GetTeamUsersAssigned gets all the users assigned to a team.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-get-all-users/
func (s *TeamsServiceOp) GetTeamUsersAssigned(ctx context.Context, orgID, teamID string) ([]AtlasUser, *Response, error) {
if orgID == "" {
return nil, nil, NewArgError("orgID", "must be set")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s/users", basePath, teamID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(AtlasUserAssigned)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//Create creates a team.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-create-one/
func (s *TeamsServiceOp) Create(ctx context.Context, orgID string, createRequest *Team) (*Team, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
req, err := s.client.NewRequest(ctx, http.MethodPost, fmt.Sprintf(teamsBasePath, orgID), createRequest)
if err != nil {
return nil, nil, err
}
root := new(Team)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//RenameTeam renames a team
//See more: https://docs.atlas.mongodb.com/reference/api/teams-rename-one/
func (s *TeamsServiceOp) Rename(ctx context.Context, orgID, teamID, teamName string) (*Team, *Response, error) {
if teamName == "" {
return nil, nil, NewArgError("teamName", "cannot be nil")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s", basePath, teamID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, map[string]interface{}{
"name": teamName,
})
if err != nil {
return nil, nil, err
}
root := new(Team)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
//UpdateTeamRoles Update the roles of a team in an Atlas project.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-update-roles/
func (s *TeamsServiceOp) UpdateTeamRoles(ctx context.Context, orgID string, teamID string, updateTeamRolesRequest *TeamUpdateRoles) ([]TeamRoles, *Response, error) {
if updateTeamRolesRequest == nil {
return nil, nil, NewArgError("updateTeamRolesRequest", "cannot be nil")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s", basePath, teamID)
req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateTeamRolesRequest)
if err != nil {
return nil, nil, err
}
root := new(TeamUpdateRolesResponse)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//AddUserToTeam adds a user from the organization associated with {ORG-ID} to the team with ID {TEAM-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-add-user/
func (s *TeamsServiceOp) AddUserToTeam(ctx context.Context, orgID, teamID, userID string) ([]AtlasUser, *Response, error) {
if userID == "" {
return nil, nil, NewArgError("userID", "cannot be nil")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s/users", basePath, teamID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, map[string]interface{}{
"id": userID,
})
if err != nil {
return nil, nil, err
}
root := new(AtlasUserAssigned)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Results, resp, nil
}
//RemoveUserToTeam removes the specified user from the specified team.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-remove-user/
func (s *TeamsServiceOp) RemoveUserToTeam(ctx context.Context, orgID, teamID, userID string) (*Response, error) {
if userID == "" {
return nil, NewArgError("userID", "cannot be nil")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s/users/%s", basePath, teamID, userID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
//RemoveTeamFromOrganization deletes the team with ID {TEAM-ID} from the organization specified to {ORG-ID}.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-delete-one/
func (s *TeamsServiceOp) RemoveTeamFromOrganization(ctx context.Context, orgID, teamID string) (*Response, error) {
if teamID == "" {
return nil, NewArgError("teamID", "cannot be nil")
}
basePath := fmt.Sprintf(teamsBasePath, orgID)
path := fmt.Sprintf("%s/%s", basePath, teamID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
//RemoveTeamFromProject removes the specified team from the specified project.
//See more: https://docs.atlas.mongodb.com/reference/api/teams-remove-from-project/
func (s *TeamsServiceOp) RemoveTeamFromProject(ctx context.Context, groupID, teamID string) (*Response, error) {
if teamID == "" {
return nil, NewArgError("teamID", "cannot be nil")
}
path := fmt.Sprintf("groups/%s/teams/%s", groupID, teamID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}

View File

@@ -0,0 +1,163 @@
package mongodbatlas
import (
"context"
"fmt"
"net/http"
)
const whitelistAPIKeysPath = "orgs/%s/apiKeys/%s/whitelist"
// WhitelistAPIKeysService is an interface for interfacing with the Whitelist API Keys
// endpoints of the MongoDB Atlas API.
// See more: https://docs.atlas.mongodb.com/reference/api/apiKeys/#organization-api-key-endpoints
type WhitelistAPIKeysService interface {
List(context.Context, string, string) (*WhitelistAPIKeys, *Response, error)
Get(context.Context, string, string, string) (*WhitelistAPIKey, *Response, error)
Create(context.Context, string, string, []*WhitelistAPIKeysReq) (*WhitelistAPIKeys, *Response, error)
Delete(context.Context, string, string, string) (*Response, error)
}
// WhitelistAPIKeysServiceOp handles communication with the Whitelist API keys related methods of the
// MongoDB Atlas API
type WhitelistAPIKeysServiceOp struct {
client *Client
}
var _ WhitelistAPIKeysService = &WhitelistAPIKeysServiceOp{}
// WhitelistAPIKey represents a Whitelist API key.
type WhitelistAPIKey struct {
CidrBlock string `json:"cidrBlock,omitempty"` // CIDR-notated range of whitelisted IP addresses.
Count int `json:"count,omitempty"` // Total number of requests that have originated from this IP address.
Created string `json:"created,omitempty"` // Date this IP address was added to the whitelist.
IPAddress string `json:"ipAddress,omitempty"` // Whitelisted IP address.
LastUsed string `json:"lastUsed,omitempty"` // Date of the most recent request that originated from this IP address. This field only appears if at least one request has originated from this IP address, and is only updated when a whitelisted resource is accessed.
LastUsedAddress string `json:"lastUsedAddress,omitempty"` // IP address from which the last call to the API was issued. This field only appears if at least one request has originated from this IP address.
Links []*Link `json:"links,omitempty"` // An array of documents, representing a link to one or more sub-resources and/or related resources such as list pagination. See Linking for more information.}
}
// WhitelistAPIKeys represents all Whitelist API keys.
type WhitelistAPIKeys struct {
Results []*WhitelistAPIKey `json:"results,omitempty"` // Includes one WhitelistAPIKey object for each item detailed in the results array section.
Links []*Link `json:"links,omitempty"` // One or more links to sub-resources and/or related resources.
TotalCount int `json:"totalCount,omitempty"` // Count of the total number of items in the result set. It may be greater than the number of objects in the results array if the entire result set is paginated.
}
// WhitelistAPIKeysReq represents the request to the mehtod create
type WhitelistAPIKeysReq struct {
IPAddress string `json:"ipAddress,omitempty"` // IP address to be added to the whitelist for the API key.
CidrBlock string `json:"cidrBlock,omitempty"` // Whitelist entry in CIDR notation to be added for the API key.
}
// List gets all Whitelist API keys.
// See more: https://docs.atlas.mongodb.com/reference/api/apiKeys-org-whitelist-get-all/
func (s *WhitelistAPIKeysServiceOp) List(ctx context.Context, orgID string, apiKeyID string) (*WhitelistAPIKeys, *Response, error) {
if orgID == "" {
return nil, nil, NewArgError("orgID", "must be set")
}
if apiKeyID == "" {
return nil, nil, NewArgError("apiKeyID", "must be set")
}
path := fmt.Sprintf(whitelistAPIKeysPath, orgID, apiKeyID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(WhitelistAPIKeys)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root, resp, nil
}
//Get gets the Whitelist API keys.
//See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-get-one/
func (s *WhitelistAPIKeysServiceOp) Get(ctx context.Context, orgID string, apiKeyID string, ipAddress string) (*WhitelistAPIKey, *Response, error) {
if orgID == "" {
return nil, nil, NewArgError("orgID", "must be set")
}
if apiKeyID == "" {
return nil, nil, NewArgError("apiKeyID", "must be set")
}
if ipAddress == "" {
return nil, nil, NewArgError("ipAddress", "must be set")
}
path := fmt.Sprintf(whitelistAPIKeysPath+"/%s", orgID, apiKeyID, ipAddress)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(WhitelistAPIKey)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Create a submit a POST request containing ipAddress or cidrBlock values which are not already present in the whitelist, Atlas adds those entries to the list of existing entries in the whitelist.
// See more: https://docs.atlas.mongodb.com/reference/api/apiKeys-org-whitelist-create/
func (s *WhitelistAPIKeysServiceOp) Create(ctx context.Context, orgID string, apiKeyID string, createRequest []*WhitelistAPIKeysReq) (*WhitelistAPIKeys, *Response, error) {
if orgID == "" {
return nil, nil, NewArgError("orgID", "must be set")
}
if apiKeyID == "" {
return nil, nil, NewArgError("apiKeyID", "must be set")
}
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf(whitelistAPIKeysPath, orgID, apiKeyID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(WhitelistAPIKeys)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}
// Delete deletes the Whitelist API keys.
// See more: https://docs.atlas.mongodb.com/reference/api/cloud-provider-snapshot-delete-one/
func (s *WhitelistAPIKeysServiceOp) Delete(ctx context.Context, orgID string, apiKeyID string, ipAddress string) (*Response, error) {
if orgID == "" {
return nil, NewArgError("groupId", "must be set")
}
if apiKeyID == "" {
return nil, NewArgError("clusterName", "must be set")
}
if ipAddress == "" {
return nil, NewArgError("snapshotId", "must be set")
}
path := fmt.Sprintf(whitelistAPIKeysPath+"/%s", orgID, apiKeyID, ipAddress)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}

12
vendor/github.com/mwielbut/pointy/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,12 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out

21
vendor/github.com/mwielbut/pointy/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 Mateusz Wielbut
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

107
vendor/github.com/mwielbut/pointy/README.md generated vendored Normal file
View File

@@ -0,0 +1,107 @@
# pointy
Simple helper functions to provide a shorthand to get a pointer to a variable holding a constant...because it's annoying when you have to do it hundreds of times in unit tests:
```golang
val := 42
pointerToVal := &val
// vs.
pointerToVal := pointy.Int(42)
```
**New in release 1.1.0**
Additional helper functions have been added to safely dereference pointers
or return a fallback value:
```golang
val := 42
pointerToVal := &val
// then later in your code..
myVal := pointy.IntValue(pointerToVal, 99) // returns 42 (or 99 if pointerToVal was nil)
```
## GoDoc
https://godoc.org/github.com/mwielbut/pointy
## Installation
`go get github.com/mwielbut/pointy`
## Example
```golang
package main
import (
"fmt"
"github.com/mwielbut/pointy"
)
func main() {
foo := pointy.Int64(2018)
fmt.Println("foo is a pointer to:", *foo)
bar := pointy.String("point to me")
fmt.Println("bar is a pointer to:", *bar)
// get the value back out (new in v1.1.0)
barVal := pointy.StringValue(bar, "empty!")
fmt.Println("bar's value is:", barVal)
}
```
## Available Functions
`Bool(x bool) *bool`
`BoolValue(p *bool, fallback bool) bool`
`Byte(x byte) *byte`
`ByteValue(p *byte, fallback byte) byte `
`Complex128(x complex128) *complex128`
`Complex128Value(p *complex128, fallback complex128) complex128`
`Complex64(x complex64) *complex64`
`Complex64Value(p *complex64, fallback complex64) complex64`
`Float32(x float32) *float32`
`Float32Value(p *float32, fallback float32) float32`
`Float64(x float64) *float64`
`Float64Value(p *float64, fallback float64) float64`
`Int(x int) *int`
`IntValue(p *int, fallback int) int`
`Int8(x int8) *int8`
`Int8Value(p *int8, fallback int8) int8`
`Int16(x int16) *int16`
`Int16Value(p *int16, fallback int16) int16`
`Int32(x int32) *int32`
`Int32Value(p *int32, fallback int32) int32`
`Int64(x int64) *int64`
`Int64Value(p *int64, fallback int64) int64`
`Uint(x uint) *uint`
`UintValue(p *uint, fallback uint) uint`
`Uint8(x uint8) *uint8`
`Uint8Value(p *uint8, fallback uint8) uint8`
`Uint16(x uint16) *uint16`
`Uint16Value(p *uint16, fallback uint16) uint16`
`Uint32(x uint32) *uint32`
`Uint32Value(p *uint32, fallback uint32) uint32`
`Uint64(x uint64) *uint64`
`Uint64Value(p *uint64, fallback uint64) uint64`
`String(x string) *string`
`StringValue(p *string, fallback string) string`
`Rune(x rune) *rune`
`RuneValue(p *rune, fallback rune) rune`
## Motivation
Creating pointers to literal constant values is useful, especially in unit tests. Go doesn't support simply using the address operator (&) to reference the location of e.g. `value := &int64(42)` so we're forced to [create](https://stackoverflow.com/questions/35146286/find-address-of-constant-in-go/35146856#35146856) [little](https://stackoverflow.com/questions/34197248/how-can-i-store-reference-to-the-result-of-an-operation-in-go/34197367#34197367) [workarounds](https://stackoverflow.com/questions/30716354/how-do-i-do-a-literal-int64-in-go/30716481#30716481). A common solution is to create a helper function:
```golang
func createInt64Pointer(x int64) *int64 {
return &x
}
// now you can create a pointer to 42 inline
value := createInt64Pointer(42)
```
This package provides a library of these simple little helper functions for every native Go primitive.

7
vendor/github.com/mwielbut/pointy/go.mod generated vendored Normal file
View File

@@ -0,0 +1,7 @@
module github.com/mwielbut/pointy
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

6
vendor/github.com/mwielbut/pointy/go.sum generated vendored Normal file
View File

@@ -0,0 +1,6 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

237
vendor/github.com/mwielbut/pointy/pointy.go generated vendored Normal file
View File

@@ -0,0 +1,237 @@
// Package pointy is a set of simple helper functions to provide a shorthand to
// get a pointer to a variable holding a constant.
package pointy
// Bool returns a pointer to a variable holding the supplied bool constant
func Bool(x bool) *bool {
return &x
}
// BoolValue returns the bool value pointed to by p or fallback if p is nil
func BoolValue(p *bool, fallback bool) bool {
if p == nil {
return fallback
}
return *p
}
// Byte returns a pointer to a variable holding the supplied byte constant
func Byte(x byte) *byte {
return &x
}
// ByteValue returns the byte value pointed to by p or fallback if p is nil
func ByteValue(p *byte, fallback byte) byte {
if p == nil {
return fallback
}
return *p
}
// Complex128 returns a pointer to a variable holding the supplied complex128 constant
func Complex128(x complex128) *complex128 {
return &x
}
// Complex128Value returns the complex128 value pointed to by p or fallback if p is nil
func Complex128Value(p *complex128, fallback complex128) complex128 {
if p == nil {
return fallback
}
return *p
}
// Complex64 returns a pointer to a variable holding the supplied complex64 constant
func Complex64(x complex64) *complex64 {
return &x
}
// Complex64Value returns the complex64 value pointed to by p or fallback if p is nil
func Complex64Value(p *complex64, fallback complex64) complex64 {
if p == nil {
return fallback
}
return *p
}
// Float32 returns a pointer to a variable holding the supplied float32 constant
func Float32(x float32) *float32 {
return &x
}
// Float32Value returns the float32 value pointed to by p or fallback if p is nil
func Float32Value(p *float32, fallback float32) float32 {
if p == nil {
return fallback
}
return *p
}
// Float64 returns a pointer to a variable holding the supplied float64 constant
func Float64(x float64) *float64 {
return &x
}
// Float64Value returns the float64 value pointed to by p or fallback if p is nil
func Float64Value(p *float64, fallback float64) float64 {
if p == nil {
return fallback
}
return *p
}
// Int returns a pointer to a variable holding the supplied int constant
func Int(x int) *int {
return &x
}
// IntValue returns the int value pointed to by p or fallback if p is nil
func IntValue(p *int, fallback int) int {
if p == nil {
return fallback
}
return *p
}
// Int8 returns a pointer to a variable holding the supplied int8 constant
func Int8(x int8) *int8 {
return &x
}
// Int8Value returns the int8 value pointed to by p or fallback if p is nil
func Int8Value(p *int8, fallback int8) int8 {
if p == nil {
return fallback
}
return *p
}
// Int16 returns a pointer to a variable holding the supplied int16 constant
func Int16(x int16) *int16 {
return &x
}
// Int16Value returns the int16 value pointed to by p or fallback if p is nil
func Int16Value(p *int16, fallback int16) int16 {
if p == nil {
return fallback
}
return *p
}
// Int32 returns a pointer to a variable holding the supplied int32 constant
func Int32(x int32) *int32 {
return &x
}
// Int32Value returns the int32 value pointed to by p or fallback if p is nil
func Int32Value(p *int32, fallback int32) int32 {
if p == nil {
return fallback
}
return *p
}
// Int64 returns a pointer to a variable holding the supplied int64 constant
func Int64(x int64) *int64 {
return &x
}
// Int64Value returns the int64 value pointed to by p or fallback if p is nil
func Int64Value(p *int64, fallback int64) int64 {
if p == nil {
return fallback
}
return *p
}
// Uint returns a pointer to a variable holding the supplied uint constant
func Uint(x uint) *uint {
return &x
}
// UintValue returns the uint value pointed to by p or fallback if p is nil
func UintValue(p *uint, fallback uint) uint {
if p == nil {
return fallback
}
return *p
}
// Uint8 returns a pointer to a variable holding the supplied uint8 constant
func Uint8(x uint8) *uint8 {
return &x
}
// Uint8Value returns the uint8 value pointed to by p or fallback if p is nil
func Uint8Value(p *uint8, fallback uint8) uint8 {
if p == nil {
return fallback
}
return *p
}
// Uint16 returns a pointer to a variable holding the supplied uint16 constant
func Uint16(x uint16) *uint16 {
return &x
}
// Uint16Value returns the uint16 value pointed to by p or fallback if p is nil
func Uint16Value(p *uint16, fallback uint16) uint16 {
if p == nil {
return fallback
}
return *p
}
// Uint32 returns a pointer to a variable holding the supplied uint32 constant
func Uint32(x uint32) *uint32 {
return &x
}
// Uint32Value returns the uint32 value pointed to by p or fallback if p is nil
func Uint32Value(p *uint32, fallback uint32) uint32 {
if p == nil {
return fallback
}
return *p
}
// Uint64 returns a pointer to a variable holding the supplied uint64 constant
func Uint64(x uint64) *uint64 {
return &x
}
// Uint64Value returns the uint64 value pointed to by p or fallback if p is nil
func Uint64Value(p *uint64, fallback uint64) uint64 {
if p == nil {
return fallback
}
return *p
}
// String returns a pointer to a variable holding the supplied string constant
func String(x string) *string {
return &x
}
// StringValue returns the string value pointed to by p or fallback if p is nil
func StringValue(p *string, fallback string) string {
if p == nil {
return fallback
}
return *p
}
// Rune returns a pointer to a variable holding the supplied rune constant
func Rune(x rune) *rune {
return &x
}
// RuneValue returns the rune value pointed to by p or fallback if p is nil
func RuneValue(p *rune, fallback rune) rune {
if p == nil {
return fallback
}
return *p
}

View File

@@ -1,97 +0,0 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/ory/dockertest/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View File

@@ -1,88 +0,0 @@
// +build ignore
// Hand writing: _Ctype_struct___0
/*
Input to cgo -godefs.
*/
package disk
/*
#include <sys/types.h>
#include <sys/mount.h>
#include <devstat.h>
enum {
sizeofPtr = sizeof(void*),
};
// because statinfo has long double snap_time, redefine with changing long long
struct statinfo2 {
long cp_time[CPUSTATES];
long tk_nin;
long tk_nout;
struct devinfo *dinfo;
long long snap_time;
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
sizeofLongDouble = C.sizeof_longlong
DEVSTAT_NO_DATA = 0x00
DEVSTAT_READ = 0x01
DEVSTAT_WRITE = 0x02
DEVSTAT_FREE = 0x03
// from sys/mount.h
MNT_RDONLY = 0x00000001 /* read only filesystem */
MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */
MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */
MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */
MNT_UNION = 0x00000020 /* union with underlying filesystem */
MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */
MNT_SUIDDIR = 0x00100000 /* special handling of SUID on dirs */
MNT_SOFTDEP = 0x00200000 /* soft updates being done */
MNT_NOSYMFOLLOW = 0x00400000 /* do not follow symlinks */
MNT_GJOURNAL = 0x02000000 /* GEOM journal support enabled */
MNT_MULTILABEL = 0x04000000 /* MAC support for individual objects */
MNT_ACLS = 0x08000000 /* ACL support enabled */
MNT_NOATIME = 0x10000000 /* disable update of file access time */
MNT_NOCLUSTERR = 0x40000000 /* disable cluster read */
MNT_NOCLUSTERW = 0x80000000 /* disable cluster write */
MNT_NFS4ACLS = 0x00000010
MNT_WAIT = 1 /* synchronously wait for I/O to complete */
MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */
MNT_LAZY = 3 /* push data not written by filesystem syncer */
MNT_SUSPEND = 4 /* Suspend file system after sync */
)
const (
sizeOfDevstat = C.sizeof_struct_devstat
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
_C_long_double C.longlong
)
type Statfs C.struct_statfs
type Fsid C.struct_fsid
type Devstat C.struct_devstat
type Bintime C.struct_bintime

View File

@@ -1,70 +0,0 @@
// +build ignore
// Hand writing: _Ctype_struct___0
/*
Input to cgo -godefs.
*/
package disk
/*
#include <sys/types.h>
#include <sys/disk.h>
#include <sys/mount.h>
enum {
sizeofPtr = sizeof(void*),
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
sizeofLongDouble = C.sizeof_longlong
DEVSTAT_NO_DATA = 0x00
DEVSTAT_READ = 0x01
DEVSTAT_WRITE = 0x02
DEVSTAT_FREE = 0x03
// from sys/mount.h
MNT_RDONLY = 0x00000001 /* read only filesystem */
MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */
MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */
MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */
MNT_NODEV = 0x00000010 /* don't interpret special files */
MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */
MNT_WAIT = 1 /* synchronously wait for I/O to complete */
MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */
MNT_LAZY = 3 /* push data not written by filesystem syncer */
)
const (
sizeOfDiskstats = C.sizeof_struct_diskstats
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
_C_long_double C.longlong
)
type Statfs C.struct_statfs
type Diskstats C.struct_diskstats
type Fsid C.fsid_t
type Timeval C.struct_timeval
type Diskstat C.struct_diskstat
type Bintime C.struct_bintime

View File

@@ -1,17 +0,0 @@
// +build ignore
// plus hand editing about timeval
/*
Input to cgo -godefs.
*/
package host
/*
#include <sys/time.h>
#include <utmpx.h>
*/
import "C"
type Utmpx C.struct_utmpx
type Timeval C.struct_timeval

View File

@@ -1,44 +0,0 @@
// +build ignore
/*
Input to cgo -godefs.
*/
package host
/*
#define KERNEL
#include <sys/types.h>
#include <sys/time.h>
#include <utmpx.h>
#include "freebsd_headers/utxdb.h"
enum {
sizeofPtr = sizeof(void*),
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
sizeOfUtmpx = C.sizeof_struct_futx
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
type Utmp C.struct_utmp // for FreeBSD 9.0 compatibility
type Utmpx C.struct_futx

View File

@@ -1,42 +0,0 @@
// +build ignore
/*
Input to cgo -godefs.
*/
package host
/*
#include <sys/types.h>
#include <utmp.h>
enum {
sizeofPtr = sizeof(void*),
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
sizeOfUtmp = C.sizeof_struct_utmp
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
type utmp C.struct_utmp
type exit_status C.struct_exit_status
type timeval C.struct_timeval

View File

@@ -1,43 +0,0 @@
// +build ignore
/*
Input to cgo -godefs.
*/
package host
/*
#define KERNEL
#include <sys/types.h>
#include <sys/time.h>
#include <utmp.h>
enum {
sizeofPtr = sizeof(void*),
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
sizeOfUtmp = C.sizeof_struct_utmp
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
type Utmp C.struct_utmp
type Timeval C.struct_timeval

View File

@@ -1,34 +0,0 @@
// +build ignore
/*
Input to cgo -godefs.
*/
package mem
/*
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/sysctl.h>
#include <uvm/uvmexp.h>
*/
import "C"
// Machine characteristics; for internal use.
const (
CTLVm = 2
CTLVfs = 10
VmUvmexp = 4 // get uvmexp
VfsGeneric = 0
VfsBcacheStat = 3
)
const (
sizeOfUvmexp = C.sizeof_struct_uvmexp
sizeOfBcachestats = C.sizeof_struct_bcachestats
)
type Uvmexp C.struct_uvmexp
type Bcachestats C.struct_bcachestats

View File

@@ -1,160 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hand Writing
// - all pointer in ExternProc to uint64
// +build ignore
/*
Input to cgo -godefs.
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
// +godefs map struct_ [16]byte /* in6_addr */
package process
/*
#define __DARWIN_UNIX03 0
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
#include <dirent.h>
#include <fcntl.h>
#include <signal.h>
#include <termios.h>
#include <unistd.h>
#include <mach/mach.h>
#include <mach/message.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <net/bpf.h>
#include <net/if_dl.h>
#include <net/if_var.h>
#include <net/route.h>
#include <netinet/in.h>
#include <sys/sysctl.h>
#include <sys/ucred.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/_types/_timeval.h>
#include <sys/appleapiopts.h>
#include <sys/cdefs.h>
#include <sys/param.h>
#include <bsm/audit.h>
#include <sys/queue.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
struct ucred_queue {
struct ucred *tqe_next;
struct ucred **tqe_prev;
TRACEBUF
};
*/
import "C"
// Machine characteristics; for internal use.
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type UGid_t C.gid_t
type KinfoProc C.struct_kinfo_proc
type Eproc C.struct_eproc
type Proc C.struct_proc
type Session C.struct_session
type ucred C.struct_ucred
type Uucred C.struct__ucred
type Upcred C.struct__pcred
type Vmspace C.struct_vmspace
type Sigacts C.struct_sigacts
type ExternProc C.struct_extern_proc
type Itimerval C.struct_itimerval
type Vnode C.struct_vnode
type Pgrp C.struct_pgrp
type UserStruct C.struct_user
type Au_session C.struct_au_session
type Posix_cred C.struct_posix_cred
type Label C.struct_label
type AuditinfoAddr C.struct_auditinfo_addr
type AuMask C.struct_au_mask
type AuTidAddr C.struct_au_tid_addr
// TAILQ(ucred)
type UcredQueue C.struct_ucred_queue

View File

@@ -1,95 +0,0 @@
// +build ignore
// We still need editing by hands.
// go tool cgo -godefs types_freebsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_freebsd_amd64.go
/*
Input to cgo -godefs.
*/
// +godefs map struct_pargs int64 /* pargs */
// +godefs map struct_proc int64 /* proc */
// +godefs map struct_user int64 /* user */
// +godefs map struct_vnode int64 /* vnode */
// +godefs map struct_vnode int64 /* vnode */
// +godefs map struct_filedesc int64 /* filedesc */
// +godefs map struct_vmspace int64 /* vmspace */
// +godefs map struct_pcb int64 /* pcb */
// +godefs map struct_thread int64 /* thread */
// +godefs map struct___sigset [16]byte /* sigset */
package process
/*
#include <sys/types.h>
#include <sys/user.h>
enum {
sizeofPtr = sizeof(void*),
};
*/
import "C"
// Machine characteristics; for internal use.
const (
CTLKern = 1 // "high kernel": proc, limits
KernProc = 14 // struct: process entries
KernProcPID = 1 // by process id
KernProcProc = 8 // only return procs
KernProcPathname = 12 // path to executable
KernProcArgs = 7 // get/set arguments/proctitle
)
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
)
const (
sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry
sizeOfKinfoProc = C.sizeof_struct_kinfo_proc
)
// from sys/proc.h
const (
SIDL = 1 /* Process being created by fork. */
SRUN = 2 /* Currently runnable. */
SSLEEP = 3 /* Sleeping on an address. */
SSTOP = 4 /* Process debugging or suspension. */
SZOMB = 5 /* Awaiting collection by parent. */
SWAIT = 6 /* Waiting for interrupt. */
SLOCK = 7 /* Blocked on a lock. */
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type KinfoProc C.struct_kinfo_proc
type Priority C.struct_priority
type KinfoVmentry C.struct_kinfo_vmentry

View File

@@ -1,103 +0,0 @@
// +build ignore
// We still need editing by hands.
// go tool cgo -godefs types_openbsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_openbsd_amd64.go
/*
Input to cgo -godefs.
*/
// +godefs map struct_pargs int64 /* pargs */
// +godefs map struct_proc int64 /* proc */
// +godefs map struct_user int64 /* user */
// +godefs map struct_vnode int64 /* vnode */
// +godefs map struct_vnode int64 /* vnode */
// +godefs map struct_filedesc int64 /* filedesc */
// +godefs map struct_vmspace int64 /* vmspace */
// +godefs map struct_pcb int64 /* pcb */
// +godefs map struct_thread int64 /* thread */
// +godefs map struct___sigset [16]byte /* sigset */
package process
/*
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/user.h>
enum {
sizeofPtr = sizeof(void*),
};
*/
import "C"
// Machine characteristics; for internal use.
const (
CTLKern = 1 // "high kernel": proc, limits
KernProc = 66 // struct: process entries
KernProcAll = 0
KernProcPID = 1 // by process id
KernProcProc = 8 // only return procs
KernProcPathname = 12 // path to executable
KernProcArgs = 55 // get/set arguments/proctitle
KernProcArgv = 1
KernProcEnv = 3
)
const (
ArgMax = 256 * 1024 // sys/syslimits.h:#define ARG_MAX
)
const (
sizeofPtr = C.sizeofPtr
sizeofShort = C.sizeof_short
sizeofInt = C.sizeof_int
sizeofLong = C.sizeof_long
sizeofLongLong = C.sizeof_longlong
)
const (
sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry
sizeOfKinfoProc = C.sizeof_struct_kinfo_proc
)
// from sys/proc.h
const (
SIDL = 1 /* Process being created by fork. */
SRUN = 2 /* Currently runnable. */
SSLEEP = 3 /* Sleeping on an address. */
SSTOP = 4 /* Process debugging or suspension. */
SZOMB = 5 /* Awaiting collection by parent. */
SDEAD = 6 /* Thread is almost gone */
SONPROC = 7 /* Thread is currently on a CPU. */
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type KinfoProc C.struct_kinfo_proc
type Priority C.struct_priority
type KinfoVmentry C.struct_kinfo_vmentry

View File

@@ -1,40 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"io"
"log"
"os"
"github.com/ulikunitz/xz"
)
func main() {
const text = "The quick brown fox jumps over the lazy dog.\n"
var buf bytes.Buffer
// compress text
w, err := xz.NewWriter(&buf)
if err != nil {
log.Fatalf("xz.NewWriter error %s", err)
}
if _, err := io.WriteString(w, text); err != nil {
log.Fatalf("WriteString error %s", err)
}
if err := w.Close(); err != nil {
log.Fatalf("w.Close error %s", err)
}
// decompress buffer and write output to stdout
r, err := xz.NewReader(&buf)
if err != nil {
log.Fatalf("NewReader error %s", err)
}
if _, err = io.Copy(os.Stdout, r); err != nil {
log.Fatalf("io.Copy error %s", err)
}
}

View File

@@ -1,78 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
//This program must be run after mksyscall.go.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
)
func writeASMFile(in string, fileName string, buildTags string) {
trampolines := map[string]bool{}
var out bytes.Buffer
fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "// +build %s\n", buildTags)
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "#include \"textflag.h\"\n")
for _, line := range strings.Split(in, "\n") {
if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
continue
}
fn := line[5 : len(line)-13]
if !trampolines[fn] {
trampolines[fn] = true
fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
}
}
err := ioutil.WriteFile(fileName, out.Bytes(), 0644)
if err != nil {
log.Fatalf("can't write %s: %s", fileName, err)
}
}
func main() {
in1, err := ioutil.ReadFile("syscall_darwin.go")
if err != nil {
log.Fatalf("can't open syscall_darwin.go: %s", err)
}
arch := os.Args[1]
in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
if err != nil {
log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
}
in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
if err != nil {
log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
}
in := string(in1) + string(in2) + string(in3)
writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12")
in1, err = ioutil.ReadFile("syscall_darwin.1_13.go")
if err != nil {
log.Fatalf("can't open syscall_darwin.1_13.go: %s", err)
}
in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch))
if err != nil {
log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err)
}
in = string(in1) + string(in2)
writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13")
}

View File

@@ -1,127 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkpost processes the output of cgo -godefs to
// modify the generated types. It is used to clean up
// the sys API in an architecture specific manner.
//
// mkpost is run after cgo -godefs; see README.md.
package main
import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"log"
"os"
"regexp"
)
func main() {
// Get the OS and architecture (using GOARCH_TARGET if it exists)
goos := os.Getenv("GOOS")
goarch := os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check that we are using the Docker-based build system if we should be.
if goos == "linux" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
os.Stderr.WriteString("See README.md\n")
os.Exit(1)
}
}
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
if goos == "aix" {
// Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
// to avoid having both StTimespec and Timespec.
sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
b = sttimespec.ReplaceAll(b, []byte("Timespec"))
}
// Intentionally export __val fields in Fsid and Sigset_t
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
// Intentionally export __fds_bits field in FdSet
fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
// If we have empty Ptrace structs, we should delete them. Only s390x emits
// nonempty Ptrace structs.
ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
b = ptraceRexexp.ReplaceAll(b, nil)
// Replace the control_regs union with a blank identifier for now.
controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
// Remove fields that are added by glibc
// Note that this is unstable as the identifers are private.
removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Convert [65]int8 to [65]byte in Utsname members to simplify
// conversion to string; see golang.org/issue/20753
convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
// Convert [n]int8 to [n]byte in Statvfs_t members to simplify
// conversion to string.
convertStatvfsRegex := regexp.MustCompile(`((Fstype|Mnton|Mntfrom)name)(\s+)\[(\d+)\]int8`)
b = convertStatvfsRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
// Convert [1024]int8 to [1024]byte in Ptmget members
convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
// Remove spare fields (e.g. in Statx_t)
spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove cgo padding fields
removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove padding, hidden, or unused fields
removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove the first line of warning from cgo
b = b[bytes.IndexByte(b, '\n')+1:]
// Modify the command in the header to include:
// mkpost, our own warning, and a build tag.
replacement := fmt.Sprintf(`$1 | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s,%s`, goarch, goos)
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
// Rename Stat_t time fields
if goos == "freebsd" && goarch == "386" {
// Hide Stat_t.[AMCB]tim_ext fields
renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
}
renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
// gofmt
b, err = format.Source(b)
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(b)
}

View File

@@ -1,402 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_darwin.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named errno.
A line beginning with //sysnb is like //sys, except that the
goroutine will not be suspended during the execution of the system
call. This must only be used for system calls which can never
block, as otherwise the system call could cause all goroutines to
hang.
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
plan9 = flag.Bool("plan9", false, "plan9")
openbsd = flag.Bool("openbsd", false, "openbsd")
netbsd = flag.Bool("netbsd", false, "netbsd")
dragonfly = flag.Bool("dragonfly", false, "dragonfly")
arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
tags = flag.String("tags", "", "build tags")
filename = flag.String("output", "", "output file name (standard output if omitted)")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
// Get the OS and architecture (using GOARCH_TARGET if it exists)
goos := os.Getenv("GOOS")
if goos == "" {
fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
os.Exit(1)
}
goarch := os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check that we are using the Docker-based build system if we should
if goos == "linux" {
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
fmt.Fprintf(os.Stderr, "See README.md\n")
os.Exit(1)
}
}
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
libc := false
if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) {
libc = true
}
trampolines := map[string]bool{}
text := ""
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, errno error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
// ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
if goos == "darwin" && !libc && funct == "ClockGettime" {
continue
}
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// Go function header.
outDecl := ""
if len(out) > 0 {
outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
// Check if err return available
errvar := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
break
}
}
// Prepare arguments to Syscall.
var args []string
n := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass dummy pointer in that case.
// Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
n++
} else if p.Type == "int64" && (*openbsd || *netbsd) {
args = append(args, "0")
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else if endianness == "little-endian" {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
} else if p.Type == "int64" && *dragonfly {
if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
args = append(args, "0")
}
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else if endianness == "little-endian" {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
} else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
if len(args)%2 == 1 && *arm {
// arm abi specifies 64-bit argument uses
// (even, odd) pair
args = append(args, "0")
}
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
}
// Determine which form to use; pad args with zeros.
asm := "Syscall"
if nonblock != nil {
if errvar == "" && goos == "linux" {
asm = "RawSyscallNoError"
} else {
asm = "RawSyscall"
}
} else {
if errvar == "" && goos == "linux" {
asm = "SyscallNoError"
}
}
if len(args) <= 3 {
for len(args) < 3 {
args = append(args, "0")
}
} else if len(args) <= 6 {
asm += "6"
for len(args) < 6 {
args = append(args, "0")
}
} else if len(args) <= 9 {
asm += "9"
for len(args) < 9 {
args = append(args, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
}
// System call number.
if sysname == "" {
sysname = "SYS_" + funct
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToUpper(sysname)
}
var libcFn string
if libc {
asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
sysname = strings.ToLower(sysname) // lowercase
libcFn = sysname
sysname = "funcPC(libc_" + sysname + "_trampoline)"
}
// Actual call.
arglist := strings.Join(args, ", ")
call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
// Assign return values.
body := ""
ret := []string{"_", "_", "_"}
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" && !*plan9 {
reg = "e1"
ret[2] = reg
doErrno = true
} else if p.Name == "err" && *plan9 {
ret[0] = "r0"
ret[2] = "e1"
break
} else {
reg = fmt.Sprintf("r%d", i)
ret[i] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%s != 0", reg)
}
if p.Type == "int64" && endianness != "" {
// 64-bit number in r1:r0 or r0:r1.
if i+2 > len(out) {
fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
}
if endianness == "big-endian" {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
} else {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
}
ret[i] = fmt.Sprintf("r%d", i)
ret[i+1] = fmt.Sprintf("r%d", i+1)
}
if reg != "e1" || *plan9 {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
text += fmt.Sprintf("\t%s\n", call)
} else {
if errvar == "" && goos == "linux" {
// raw syscall without error on Linux, see golang.org/issue/22924
text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
} else {
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
}
}
text += body
if *plan9 && ret[2] == "e1" {
text += "\tif int32(r0) == -1 {\n"
text += "\t\terr = e1\n"
text += "\t}\n"
} else if doErrno {
text += "\tif e1 != 0 {\n"
text += "\t\terr = errnoErr(e1)\n"
text += "\t}\n"
}
text += "\treturn\n"
text += "}\n\n"
if libc && !trampolines[libcFn] {
// some system calls share a trampoline, like read and readlen.
trampolines[libcFn] = true
// Declare assembly trampoline.
text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
// Assembly trampoline calls the libc_* function, which this magic
// redirects to use the function from libSystem.
text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
text += "\n"
}
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
%s
`

View File

@@ -1,415 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_aix.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different than its libc name,
* or the function is not in libc, name could be specified
* at the end, after "=" sign, like
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
aix = flag.Bool("aix", false, "aix")
tags = flag.String("tags", "", "build tags")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
pack := ""
text := ""
cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
pack = p[1]
}
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, err error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
inps = strings.Join(in, ", ")
outps = strings.Join(out, ", ")
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// Check if value return, err return available
errvar := ""
retvar := ""
rettype := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
} else {
retvar = p.Name
rettype = p.Type
}
}
// System call name.
if sysname == "" {
sysname = funct
}
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
cRettype := ""
if rettype == "unsafe.Pointer" {
cRettype = "uintptr_t"
} else if rettype == "uintptr" {
cRettype = "uintptr_t"
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
cRettype = "uintptr_t"
} else if rettype == "int" {
cRettype = "int"
} else if rettype == "int32" {
cRettype = "int"
} else if rettype == "int64" {
cRettype = "long long"
} else if rettype == "uint32" {
cRettype = "unsigned int"
} else if rettype == "uint64" {
cRettype = "unsigned long long"
} else {
cRettype = "int"
}
if sysname == "exit" {
cRettype = "void"
}
// Change p.Types to c
var cIn []string
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "string" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t", "size_t")
} else if p.Type == "unsafe.Pointer" {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "uintptr" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "int" {
cIn = append(cIn, "int")
} else if p.Type == "int32" {
cIn = append(cIn, "int")
} else if p.Type == "int64" {
cIn = append(cIn, "long long")
} else if p.Type == "uint32" {
cIn = append(cIn, "unsigned int")
} else if p.Type == "uint64" {
cIn = append(cIn, "unsigned long long")
} else {
cIn = append(cIn, "int")
}
}
if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
cExtern += "#define c_select select\n"
}
// Imports of system calls from libc
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
cIn := strings.Join(cIn, ", ")
cExtern += fmt.Sprintf("(%s);\n", cIn)
}
// So file name.
if *aix {
if modname == "" {
modname = "libc.a/shr_64.o"
} else {
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
os.Exit(1)
}
}
strconvfunc := "C.CString"
// Go function header.
if outps != "" {
outps = fmt.Sprintf(" (%s)", outps)
}
if text != "" {
text += "\n"
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
// Prepare arguments to Syscall.
var args []string
n := 0
argN := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass nil in that case.
text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
n++
text += fmt.Sprintf("\tvar _p%d int\n", n)
text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
n++
} else if p.Type == "int64" && endianness != "" {
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
n++
} else if p.Type == "bool" {
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
args = append(args, fmt.Sprintf("_p%d", n))
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
} else if p.Type == "unsafe.Pointer" {
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
} else if p.Type == "int" {
if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
} else if argN == 0 && funct == "fcntl" {
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
}
} else if p.Type == "int32" {
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
} else if p.Type == "int64" {
args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
} else if p.Type == "uint32" {
args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
} else if p.Type == "uint64" {
args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
} else if p.Type == "uintptr" {
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
}
argN++
}
// Actual call.
arglist := strings.Join(args, ", ")
call := ""
if sysname == "exit" {
if errvar != "" {
call += "er :="
} else {
call += ""
}
} else if errvar != "" {
call += "r0,er :="
} else if retvar != "" {
call += "r0,_ :="
} else {
call += ""
}
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
} else {
call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
}
// Assign return values.
body := ""
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" {
reg = "e1"
} else {
reg = "r0"
}
if reg != "e1" {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
// verify return
if sysname != "exit" && errvar != "" {
if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
body += fmt.Sprintf("\t\t%s = er\n", errvar)
body += "\t}\n"
} else {
body += "\tif (r0 ==-1 && er != nil) {\n"
body += fmt.Sprintf("\t\t%s = er\n", errvar)
body += "\t}\n"
}
} else if errvar != "" {
body += "\tif (er != nil) {\n"
body += fmt.Sprintf("\t\t%s = er\n", errvar)
body += "\t}\n"
}
text += fmt.Sprintf("\t%s\n", call)
text += body
text += "\treturn\n"
text += "}\n"
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
imp := ""
if pack != "unix" {
imp = "import \"golang.org/x/sys/unix\"\n"
}
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package %s
%s
*/
import "C"
import (
"unsafe"
)
%s
%s
`

View File

@@ -1,614 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_aix.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different than its libc name,
* or the function is not in libc, name could be specified
* at the end, after "=" sign, like
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
This program will generate three files and handle both gc and gccgo implementation:
- zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
- zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
- zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
The generated code looks like this
zsyscall_aix_ppc64.go
func asyscall(...) (n int, err error) {
// Pointer Creation
r1, e1 := callasyscall(...)
// Type Conversion
// Error Handler
return
}
zsyscall_aix_ppc64_gc.go
//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
//go:linkname libc_asyscall libc_asyscall
var asyscall syscallFunc
func callasyscall(...) (r1 uintptr, e1 Errno) {
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
return
}
zsyscall_aix_ppc64_ggcgo.go
// int asyscall(...)
import "C"
func callasyscall(...) (r1 uintptr, e1 Errno) {
r1 = uintptr(C.asyscall(...))
e1 = syscall.GetErrno()
return
}
*/
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
aix = flag.Bool("aix", false, "aix")
tags = flag.String("tags", "", "build tags")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
pack := ""
// GCCGO
textgccgo := ""
cExtern := "/*\n#include <stdint.h>\n"
// GC
textgc := ""
dynimports := ""
linknames := ""
var vars []string
// COMMON
textcommon := ""
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
pack = p[1]
}
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, err error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
inps = strings.Join(in, ", ")
outps = strings.Join(out, ", ")
if sysname == "" {
sysname = funct
}
onlyCommon := false
if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
// This function call another syscall which is already implemented.
// Therefore, the gc and gccgo part must not be generated.
onlyCommon = true
}
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
if !onlyCommon {
textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
}
// Check if value return, err return available
errvar := ""
rettype := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
} else {
rettype = p.Type
}
}
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
// GCCGO Prototype return type
cRettype := ""
if rettype == "unsafe.Pointer" {
cRettype = "uintptr_t"
} else if rettype == "uintptr" {
cRettype = "uintptr_t"
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
cRettype = "uintptr_t"
} else if rettype == "int" {
cRettype = "int"
} else if rettype == "int32" {
cRettype = "int"
} else if rettype == "int64" {
cRettype = "long long"
} else if rettype == "uint32" {
cRettype = "unsigned int"
} else if rettype == "uint64" {
cRettype = "unsigned long long"
} else {
cRettype = "int"
}
if sysname == "exit" {
cRettype = "void"
}
// GCCGO Prototype arguments type
var cIn []string
for i, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "string" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t", "size_t")
} else if p.Type == "unsafe.Pointer" {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "uintptr" {
cIn = append(cIn, "uintptr_t")
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
cIn = append(cIn, "uintptr_t")
} else if p.Type == "int" {
if (i == 0 || i == 2) && funct == "fcntl" {
// These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
cIn = append(cIn, "uintptr_t")
} else {
cIn = append(cIn, "int")
}
} else if p.Type == "int32" {
cIn = append(cIn, "int")
} else if p.Type == "int64" {
cIn = append(cIn, "long long")
} else if p.Type == "uint32" {
cIn = append(cIn, "unsigned int")
} else if p.Type == "uint64" {
cIn = append(cIn, "unsigned long long")
} else {
cIn = append(cIn, "int")
}
}
if !onlyCommon {
// GCCGO Prototype Generation
// Imports of system calls from libc
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
cExtern += "#define c_select select\n"
}
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
cIn := strings.Join(cIn, ", ")
cExtern += fmt.Sprintf("(%s);\n", cIn)
}
// GC Library name
if modname == "" {
modname = "libc.a/shr_64.o"
} else {
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
os.Exit(1)
}
sysvarname := fmt.Sprintf("libc_%s", sysname)
if !onlyCommon {
// GC Runtime import of function to allow cross-platform builds.
dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
// GC Link symbol to proc address variable.
linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
// GC Library proc address variable.
vars = append(vars, sysvarname)
}
strconvfunc := "BytePtrFromString"
strconvtype := "*byte"
// Go function header.
if outps != "" {
outps = fmt.Sprintf(" (%s)", outps)
}
if textcommon != "" {
textcommon += "\n"
}
textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
// Prepare arguments tocall.
var argscommon []string // Arguments in the common part
var argscall []string // Arguments for call prototype
var argsgc []string // Arguments for gc call (with syscall6)
var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
n := 0
argN := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else if p.Type == "string" && errvar != "" {
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
n++
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass nil in that case.
textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
n++
} else if p.Type == "int64" && endianness != "" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
} else if p.Type == "bool" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else if p.Type == "int" {
if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
// These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
}
} else if p.Type == "int32" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
} else if p.Type == "int64" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
} else if p.Type == "uint32" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
} else if p.Type == "uint64" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
} else if p.Type == "uintptr" {
argscommon = append(argscommon, p.Name)
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
argsgc = append(argsgc, p.Name)
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
} else {
argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
}
argN++
}
nargs := len(argsgc)
// COMMON function generation
argscommonlist := strings.Join(argscommon, ", ")
callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
ret := []string{"_", "_"}
body := ""
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" {
reg = "e1"
ret[1] = reg
doErrno = true
} else {
reg = "r0"
ret[0] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%s != 0", reg)
}
if reg != "e1" {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" {
textcommon += fmt.Sprintf("\t%s\n", callcommon)
} else {
textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
}
textcommon += body
if doErrno {
textcommon += "\tif e1 != 0 {\n"
textcommon += "\t\terr = errnoErr(e1)\n"
textcommon += "\t}\n"
}
textcommon += "\treturn\n"
textcommon += "}\n"
if onlyCommon {
continue
}
// CALL Prototype
callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
// GC function generation
asm := "syscall6"
if nonblock != nil {
asm = "rawSyscall6"
}
if len(argsgc) <= 6 {
for len(argsgc) < 6 {
argsgc = append(argsgc, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
os.Exit(1)
}
argsgclist := strings.Join(argsgc, ", ")
callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
textgc += callProto
textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
textgc += "\treturn\n}\n"
// GCCGO function generation
argsgccgolist := strings.Join(argsgccgo, ", ")
var callgccgo string
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
} else {
callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
}
textgccgo += callProto
textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
textgccgo += "\te1 = syscall.GetErrno()\n"
textgccgo += "\treturn\n}\n"
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
imp := ""
if pack != "unix" {
imp = "import \"golang.org/x/sys/unix\"\n"
}
// Print zsyscall_aix_ppc64.go
err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
[]byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
0644)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
// Print zsyscall_aix_ppc64_gc.go
vardecls := "\t" + strings.Join(vars, ",\n\t")
vardecls += " syscallFunc"
err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
[]byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
0644)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
// Print zsyscall_aix_ppc64_gccgo.go
err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
[]byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
0644)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
}
const srcTemplate1 = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package %s
import (
"unsafe"
)
%s
%s
`
const srcTemplate2 = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
// +build !gccgo
package %s
import (
"unsafe"
)
%s
%s
%s
type syscallFunc uintptr
var (
%s
)
// Implemented in runtime/syscall_aix.go.
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
%s
`
const srcTemplate3 = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
// +build gccgo
package %s
%s
*/
import "C"
import (
"syscall"
)
%s
%s
`

View File

@@ -1,335 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program reads a file containing function prototypes
(like syscall_solaris.go) and generates system call bodies.
The prototypes are marked by lines beginning with "//sys"
and read like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument.
This includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different than its libc name,
* or the function is not in libc, name could be specified
* at the end, after "=" sign, like
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
*/
package main
import (
"bufio"
"flag"
"fmt"
"os"
"regexp"
"strings"
)
var (
b32 = flag.Bool("b32", false, "32bit big-endian")
l32 = flag.Bool("l32", false, "32bit little-endian")
tags = flag.String("tags", "", "build tags")
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return *tags
}
// Param is function parameter
type Param struct {
Name string
Type string
}
// usage prints the program usage
func usage() {
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
os.Exit(1)
}
// parseParamList parses parameter list and returns a slice of parameters
func parseParamList(list string) []string {
list = strings.TrimSpace(list)
if list == "" {
return []string{}
}
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
}
// parseParam splits a parameter into name and type
func parseParam(p string) Param {
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
if ps == nil {
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
os.Exit(1)
}
return Param{ps[1], ps[2]}
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
endianness := ""
if *b32 {
endianness = "big-endian"
} else if *l32 {
endianness = "little-endian"
}
pack := ""
text := ""
dynimports := ""
linknames := ""
var vars []string
for _, path := range flag.Args() {
file, err := os.Open(path)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
t := s.Text()
t = strings.TrimSpace(t)
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
pack = p[1]
}
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
continue
}
// Line must be of the form
// func Open(path string, mode int, perm int) (fd int, err error)
// Split into name, in params, out params.
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
if f == nil {
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
os.Exit(1)
}
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
// Split argument lists on comma.
in := parseParamList(inps)
out := parseParamList(outps)
inps = strings.Join(in, ", ")
outps = strings.Join(out, ", ")
// Try in vain to keep people from editing this file.
// The theory is that they jump into the middle of the file
// without reading the header.
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
// So file name.
if modname == "" {
modname = "libc"
}
// System call name.
if sysname == "" {
sysname = funct
}
// System call pointer variable name.
sysvarname := fmt.Sprintf("proc%s", sysname)
strconvfunc := "BytePtrFromString"
strconvtype := "*byte"
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
// Runtime import of function to allow cross-platform builds.
dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
// Link symbol to proc address variable.
linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
// Library proc address variable.
vars = append(vars, sysvarname)
// Go function header.
outlist := strings.Join(out, ", ")
if outlist != "" {
outlist = fmt.Sprintf(" (%s)", outlist)
}
if text != "" {
text += "\n"
}
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
// Check if err return available
errvar := ""
for _, param := range out {
p := parseParam(param)
if p.Type == "error" {
errvar = p.Name
continue
}
}
// Prepare arguments to Syscall.
var args []string
n := 0
for _, param := range in {
p := parseParam(param)
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
} else if p.Type == "string" && errvar != "" {
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if p.Type == "string" {
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
n++
} else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
// Convert slice into pointer, length.
// Have to be careful not to take address of &a[0] if len == 0:
// pass nil in that case.
text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
n++
} else if p.Type == "int64" && endianness != "" {
if endianness == "big-endian" {
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
}
} else if p.Type == "bool" {
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
n++
} else {
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
}
}
nargs := len(args)
// Determine which form to use; pad args with zeros.
asm := "sysvicall6"
if nonblock != nil {
asm = "rawSysvicall6"
}
if len(args) <= 6 {
for len(args) < 6 {
args = append(args, "0")
}
} else {
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
os.Exit(1)
}
// Actual call.
arglist := strings.Join(args, ", ")
call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
// Assign return values.
body := ""
ret := []string{"_", "_", "_"}
doErrno := false
for i := 0; i < len(out); i++ {
p := parseParam(out[i])
reg := ""
if p.Name == "err" {
reg = "e1"
ret[2] = reg
doErrno = true
} else {
reg = fmt.Sprintf("r%d", i)
ret[i] = reg
}
if p.Type == "bool" {
reg = fmt.Sprintf("%d != 0", reg)
}
if p.Type == "int64" && endianness != "" {
// 64-bit number in r1:r0 or r0:r1.
if i+2 > len(out) {
fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
os.Exit(1)
}
if endianness == "big-endian" {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
} else {
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
}
ret[i] = fmt.Sprintf("r%d", i)
ret[i+1] = fmt.Sprintf("r%d", i+1)
}
if reg != "e1" {
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
}
}
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
text += fmt.Sprintf("\t%s\n", call)
} else {
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
}
text += body
if doErrno {
text += "\tif e1 != 0 {\n"
text += "\t\terr = e1\n"
text += "\t}\n"
}
text += "\treturn\n"
text += "}\n"
}
if err := s.Err(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
file.Close()
}
imp := ""
if pack != "unix" {
imp = "import \"golang.org/x/sys/unix\"\n"
}
vardecls := "\t" + strings.Join(vars, ",\n\t")
vardecls += " syscallFunc"
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
}
const srcTemplate = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package %s
import (
"syscall"
"unsafe"
)
%s
%s
%s
var (
%s
)
%s
`

View File

@@ -1,355 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
//
// Build a MIB with each entry being an array containing the level, type and
// a hash that will contain additional entries if the current entry is a node.
// We then walk this MIB and create a flattened sysctl name to OID hash.
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
)
var (
goos, goarch string
)
// cmdLine returns this programs's commandline arguments.
func cmdLine() string {
return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags.
func buildTags() string {
return fmt.Sprintf("%s,%s", goarch, goos)
}
// reMatch performs regular expression match and stores the substring slice to value pointed by m.
func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
*m = re.FindStringSubmatch(str)
if *m != nil {
return true
}
return false
}
type nodeElement struct {
n int
t string
pE *map[string]nodeElement
}
var (
debugEnabled bool
mib map[string]nodeElement
node *map[string]nodeElement
nodeMap map[string]string
sysCtl []string
)
var (
ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
netInetRE = regexp.MustCompile(`^netinet/`)
netInet6RE = regexp.MustCompile(`^netinet6/`)
netRE = regexp.MustCompile(`^net/`)
bracesRE = regexp.MustCompile(`{.*}`)
ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
)
func debug(s string) {
if debugEnabled {
fmt.Fprintln(os.Stderr, s)
}
}
// Walk the MIB and build a sysctl name to OID mapping.
func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
lNode := pNode // local copy of pointer to node
var keys []string
for k := range *lNode {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
nodename := name
if name != "" {
nodename += "."
}
nodename += key
nodeoid := append(oid, (*pNode)[key].n)
if (*pNode)[key].t == `CTLTYPE_NODE` {
if _, ok := nodeMap[nodename]; ok {
lNode = &mib
ctlName := nodeMap[nodename]
for _, part := range strings.Split(ctlName, ".") {
lNode = ((*lNode)[part]).pE
}
} else {
lNode = (*pNode)[key].pE
}
buildSysctl(lNode, nodename, nodeoid)
} else if (*pNode)[key].t != "" {
oidStr := []string{}
for j := range nodeoid {
oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
}
text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
sysCtl = append(sysCtl, text)
}
}
}
func main() {
// Get the OS (using GOOS_TARGET if it exist)
goos = os.Getenv("GOOS_TARGET")
if goos == "" {
goos = os.Getenv("GOOS")
}
// Get the architecture (using GOARCH_TARGET if it exists)
goarch = os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check if GOOS and GOARCH environment variables are defined
if goarch == "" || goos == "" {
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
os.Exit(1)
}
mib = make(map[string]nodeElement)
headers := [...]string{
`sys/sysctl.h`,
`sys/socket.h`,
`sys/tty.h`,
`sys/malloc.h`,
`sys/mount.h`,
`sys/namei.h`,
`sys/sem.h`,
`sys/shm.h`,
`sys/vmmeter.h`,
`uvm/uvmexp.h`,
`uvm/uvm_param.h`,
`uvm/uvm_swap_encrypt.h`,
`ddb/db_var.h`,
`net/if.h`,
`net/if_pfsync.h`,
`net/pipex.h`,
`netinet/in.h`,
`netinet/icmp_var.h`,
`netinet/igmp_var.h`,
`netinet/ip_ah.h`,
`netinet/ip_carp.h`,
`netinet/ip_divert.h`,
`netinet/ip_esp.h`,
`netinet/ip_ether.h`,
`netinet/ip_gre.h`,
`netinet/ip_ipcomp.h`,
`netinet/ip_ipip.h`,
`netinet/pim_var.h`,
`netinet/tcp_var.h`,
`netinet/udp_var.h`,
`netinet6/in6.h`,
`netinet6/ip6_divert.h`,
`netinet6/pim6_var.h`,
`netinet/icmp6.h`,
`netmpls/mpls.h`,
}
ctls := [...]string{
`kern`,
`vm`,
`fs`,
`net`,
//debug /* Special handling required */
`hw`,
//machdep /* Arch specific */
`user`,
`ddb`,
//vfs /* Special handling required */
`fs.posix`,
`kern.forkstat`,
`kern.intrcnt`,
`kern.malloc`,
`kern.nchstats`,
`kern.seminfo`,
`kern.shminfo`,
`kern.timecounter`,
`kern.tty`,
`kern.watchdog`,
`net.bpf`,
`net.ifq`,
`net.inet`,
`net.inet.ah`,
`net.inet.carp`,
`net.inet.divert`,
`net.inet.esp`,
`net.inet.etherip`,
`net.inet.gre`,
`net.inet.icmp`,
`net.inet.igmp`,
`net.inet.ip`,
`net.inet.ip.ifq`,
`net.inet.ipcomp`,
`net.inet.ipip`,
`net.inet.mobileip`,
`net.inet.pfsync`,
`net.inet.pim`,
`net.inet.tcp`,
`net.inet.udp`,
`net.inet6`,
`net.inet6.divert`,
`net.inet6.ip6`,
`net.inet6.icmp6`,
`net.inet6.pim6`,
`net.inet6.tcp6`,
`net.inet6.udp6`,
`net.mpls`,
`net.mpls.ifq`,
`net.key`,
`net.pflow`,
`net.pfsync`,
`net.pipex`,
`net.rt`,
`vm.swapencrypt`,
//vfsgenctl /* Special handling required */
}
// Node name "fixups"
ctlMap := map[string]string{
"ipproto": "net.inet",
"net.inet.ipproto": "net.inet",
"net.inet6.ipv6proto": "net.inet6",
"net.inet6.ipv6": "net.inet6.ip6",
"net.inet.icmpv6": "net.inet6.icmp6",
"net.inet6.divert6": "net.inet6.divert",
"net.inet6.tcp6": "net.inet.tcp",
"net.inet6.udp6": "net.inet.udp",
"mpls": "net.mpls",
"swpenc": "vm.swapencrypt",
}
// Node mappings
nodeMap = map[string]string{
"net.inet.ip.ifq": "net.ifq",
"net.inet.pfsync": "net.pfsync",
"net.mpls.ifq": "net.ifq",
}
mCtls := make(map[string]bool)
for _, ctl := range ctls {
mCtls[ctl] = true
}
for _, header := range headers {
debug("Processing " + header)
file, err := os.Open(filepath.Join("/usr/include", header))
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
var sub []string
if reMatch(ctlNames1RE, s.Text(), &sub) ||
reMatch(ctlNames2RE, s.Text(), &sub) ||
reMatch(ctlNames3RE, s.Text(), &sub) {
if sub[1] == `CTL_NAMES` {
// Top level.
node = &mib
} else {
// Node.
nodename := strings.ToLower(sub[2])
ctlName := ""
if reMatch(netInetRE, header, &sub) {
ctlName = "net.inet." + nodename
} else if reMatch(netInet6RE, header, &sub) {
ctlName = "net.inet6." + nodename
} else if reMatch(netRE, header, &sub) {
ctlName = "net." + nodename
} else {
ctlName = nodename
ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
}
if val, ok := ctlMap[ctlName]; ok {
ctlName = val
}
if _, ok := mCtls[ctlName]; !ok {
debug("Ignoring " + ctlName + "...")
continue
}
// Walk down from the top of the MIB.
node = &mib
for _, part := range strings.Split(ctlName, ".") {
if _, ok := (*node)[part]; !ok {
debug("Missing node " + part)
(*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
}
node = (*node)[part].pE
}
}
// Populate current node with entries.
i := -1
for !strings.HasPrefix(s.Text(), "}") {
s.Scan()
if reMatch(bracesRE, s.Text(), &sub) {
i++
}
if !reMatch(ctlTypeRE, s.Text(), &sub) {
continue
}
(*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
}
}
}
err = s.Err()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
file.Close()
}
buildSysctl(&mib, "", []int{})
sort.Strings(sysCtl)
text := strings.Join(sysCtl, "")
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
}
const srcTemplate = `// %s
// Code generated by the command above; DO NOT EDIT.
// +build %s
package unix
type mibentry struct {
ctlname string
ctloid []_C_int
}
var sysctlMib = []mibentry {
%s
}
`

View File

@@ -1,190 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Generate system call table for DragonFly, NetBSD,
// FreeBSD, OpenBSD or Darwin from master list
// (for example, /usr/src/sys/kern/syscalls.master or
// sys/syscall.h).
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
)
var (
goos, goarch string
)
// cmdLine returns this programs's commandline arguments
func cmdLine() string {
return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags
func buildTags() string {
return fmt.Sprintf("%s,%s", goarch, goos)
}
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
// source string and substring slice for regexp
type re struct {
str string // source string
sub []string // matched sub-string
}
// Match performs regular expression match
func (r *re) Match(exp string) bool {
r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
if r.sub != nil {
return true
}
return false
}
// fetchFile fetches a text file from URL
func fetchFile(URL string) io.Reader {
resp, err := http.Get(URL)
checkErr(err)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
checkErr(err)
return strings.NewReader(string(body))
}
// readFile reads a text file from path
func readFile(path string) io.Reader {
file, err := os.Open(os.Args[1])
checkErr(err)
return file
}
func format(name, num, proto string) string {
name = strings.ToUpper(name)
// There are multiple entries for enosys and nosys, so comment them out.
nm := re{str: name}
if nm.Match(`^SYS_E?NOSYS$`) {
name = fmt.Sprintf("// %s", name)
}
if name == `SYS_SYS_EXIT` {
name = `SYS_EXIT`
}
return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
}
func main() {
// Get the OS (using GOOS_TARGET if it exist)
goos = os.Getenv("GOOS_TARGET")
if goos == "" {
goos = os.Getenv("GOOS")
}
// Get the architecture (using GOARCH_TARGET if it exists)
goarch = os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check if GOOS and GOARCH environment variables are defined
if goarch == "" || goos == "" {
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
os.Exit(1)
}
file := strings.TrimSpace(os.Args[1])
var syscalls io.Reader
if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
// Download syscalls.master file
syscalls = fetchFile(file)
} else {
syscalls = readFile(file)
}
var text, line string
s := bufio.NewScanner(syscalls)
for s.Scan() {
t := re{str: line}
if t.Match(`^(.*)\\$`) {
// Handle continuation
line = t.sub[1]
line += strings.TrimLeft(s.Text(), " \t")
} else {
// New line
line = s.Text()
}
t = re{str: line}
if t.Match(`\\$`) {
continue
}
t = re{str: line}
switch goos {
case "dragonfly":
if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
num, proto := t.sub[1], t.sub[2]
name := fmt.Sprintf("SYS_%s", t.sub[3])
text += format(name, num, proto)
}
case "freebsd":
if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
num, proto := t.sub[1], t.sub[2]
name := fmt.Sprintf("SYS_%s", t.sub[3])
text += format(name, num, proto)
}
case "openbsd":
if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
num, proto, name := t.sub[1], t.sub[3], t.sub[4]
text += format(name, num, proto)
}
case "netbsd":
if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
name := t.sub[7] + "_" + t.sub[9]
if t.sub[11] != "" {
name = t.sub[7] + "_" + t.sub[11]
}
name = strings.ToUpper(name)
if compat == "" || compat == "13" || compat == "30" || compat == "50" {
text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
}
}
case "darwin":
if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
name, num := t.sub[1], t.sub[2]
name = strings.ToUpper(name)
text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
}
default:
fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
os.Exit(1)
}
}
err := s.Err()
checkErr(err)
fmt.Printf(template, cmdLine(), buildTags(), text)
}
const template = `// %s
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build %s
package unix
const(
%s)`

View File

@@ -1,237 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// +build aix
/*
Input to cgo -godefs. See also mkerrors.sh and mkall.sh
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#include <sys/types.h>
#include <sys/time.h>
#include <sys/limits.h>
#include <sys/un.h>
#include <utime.h>
#include <sys/utsname.h>
#include <sys/poll.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/termio.h>
#include <sys/ioctl.h>
#include <termios.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <dirent.h>
#include <fcntl.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
PathMax = C.PATH_MAX
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
type off64 C.off64_t
type off C.off_t
type Mode_t C.mode_t
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
type Timex C.struct_timex
type Time_t C.time_t
type Tms C.struct_tms
type Utimbuf C.struct_utimbuf
type Timezone C.struct_timezone
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit64
type Pid_t C.pid_t
type _Gid_t C.gid_t
type dev_t C.dev_t
// Files
type Stat_t C.struct_stat
type StatxTimestamp C.struct_statx_timestamp
type Statx_t C.struct_statx
type Dirent C.struct_dirent
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Cmsghdr C.struct_cmsghdr
type ICMPv6Filter C.struct_icmp6_filter
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type IPv6MTUInfo C.struct_ip6_mtuinfo
type Linger C.struct_linger
type Msghdr C.struct_msghdr
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
)
type IfMsgHdr C.struct_if_msghdr
// Misc
type FdSet C.fd_set
type Utsname C.struct_utsname
type Ustat_t C.struct_ustat
type Sigset_t C.sigset_t
const (
AT_FDCWD = C.AT_FDCWD
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// Terminal handling
type Termios C.struct_termios
type Termio C.struct_termio
type Winsize C.struct_winsize
//poll
type PollFd struct {
Fd int32
Events uint16
Revents uint16
}
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
//flock_t
type Flock_t C.struct_flock64
// Statfs
type Fsid_t C.struct_fsid_t
type Fsid64_t C.struct_fsid64_t
type Statfs_t C.struct_statfs
const RNDGETENTCNT = 0x80045200

View File

@@ -1,283 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define __DARWIN_UNIX03 0
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <unistd.h>
#include <mach/mach.h>
#include <mach/message.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_var.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat64
type Statfs_t C.struct_statfs64
type Flock_t C.struct_flock
type Fstore_t C.struct_fstore
type Radvisory_t C.struct_radvisory
type Fbootstraptransfer_t C.struct_fbootstraptransfer
type Log2phys_t C.struct_log2phys
type Fsid C.struct_fsid
type Dirent C.struct_dirent
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet4Pktinfo C.struct_in_pktinfo
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfmaMsghdr2 C.struct_ifma_msghdr2
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

View File

@@ -1,269 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type Fsid C.struct_fsid
// File system limits
const (
PathMax = C.PATH_MAX
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

View File

@@ -1,406 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define _WANT_FREEBSD11_STAT 1
#define _WANT_FREEBSD11_STATFS 1
#define _WANT_FREEBSD11_DIRENT 1
#define _WANT_FREEBSD11_KEVENT 1
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/capsicum.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
// See /usr/include/net/if.h.
struct if_data8 {
u_char ifi_type;
u_char ifi_physical;
u_char ifi_addrlen;
u_char ifi_hdrlen;
u_char ifi_link_state;
u_char ifi_spare_char1;
u_char ifi_spare_char2;
u_char ifi_datalen;
u_long ifi_mtu;
u_long ifi_metric;
u_long ifi_baudrate;
u_long ifi_ipackets;
u_long ifi_ierrors;
u_long ifi_opackets;
u_long ifi_oerrors;
u_long ifi_collisions;
u_long ifi_ibytes;
u_long ifi_obytes;
u_long ifi_imcasts;
u_long ifi_omcasts;
u_long ifi_iqdrops;
u_long ifi_noproto;
u_long ifi_hwassist;
// FIXME: these are now unions, so maybe need to change definitions?
#undef ifi_epoch
time_t ifi_epoch;
#undef ifi_lastchange
struct timeval ifi_lastchange;
};
// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
// See /usr/include/net/if.h.
struct if_msghdr8 {
u_short ifm_msglen;
u_char ifm_version;
u_char ifm_type;
int ifm_addrs;
int ifm_flags;
u_short ifm_index;
struct if_data8 ifm_data;
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
const (
_statfsVersion = C.STATFS_VERSION
_dirblksiz = C.DIRBLKSIZ
)
type Stat_t C.struct_stat
type stat_freebsd11_t C.struct_freebsd11_stat
type Statfs_t C.struct_statfs
type statfs_freebsd11_t C.struct_freebsd11_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type dirent_freebsd11 C.struct_freebsd11_dirent
type Fsid C.struct_fsid
// File system limits
const (
PathMax = C.PATH_MAX
)
// Advice to Fadvise
const (
FADV_NORMAL = C.POSIX_FADV_NORMAL
FADV_RANDOM = C.POSIX_FADV_RANDOM
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPMreqn C.struct_ip_mreqn
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPMreqn = C.sizeof_struct_ip_mreqn
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_ATTACH = C.PT_ATTACH
PTRACE_CONT = C.PT_CONTINUE
PTRACE_DETACH = C.PT_DETACH
PTRACE_GETFPREGS = C.PT_GETFPREGS
PTRACE_GETFSBASE = C.PT_GETFSBASE
PTRACE_GETLWPLIST = C.PT_GETLWPLIST
PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
PTRACE_GETREGS = C.PT_GETREGS
PTRACE_GETXSTATE = C.PT_GETXSTATE
PTRACE_IO = C.PT_IO
PTRACE_KILL = C.PT_KILL
PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
PTRACE_LWPINFO = C.PT_LWPINFO
PTRACE_SETFPREGS = C.PT_SETFPREGS
PTRACE_SETREGS = C.PT_SETREGS
PTRACE_SINGLESTEP = C.PT_STEP
PTRACE_TRACEME = C.PT_TRACE_ME
)
const (
PIOD_READ_D = C.PIOD_READ_D
PIOD_WRITE_D = C.PIOD_WRITE_D
PIOD_READ_I = C.PIOD_READ_I
PIOD_WRITE_I = C.PIOD_WRITE_I
)
const (
PL_FLAG_BORN = C.PL_FLAG_BORN
PL_FLAG_EXITED = C.PL_FLAG_EXITED
PL_FLAG_SI = C.PL_FLAG_SI
)
const (
TRAP_BRKPT = C.TRAP_BRKPT
TRAP_TRACE = C.TRAP_TRACE
)
type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
type __Siginfo C.struct___siginfo
type Sigset_t C.sigset_t
type Reg C.struct_reg
type FpReg C.struct_fpreg
type PtraceIoDesc C.struct_ptrace_io_desc
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent_freebsd11
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
sizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
sizeofIfData = C.sizeof_struct_if_data
SizeofIfData = C.sizeof_struct_if_data8
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type ifMsghdr C.struct_if_msghdr
type IfMsghdr C.struct_if_msghdr8
type ifData C.struct_if_data
type IfData C.struct_if_data8
type IfaMsghdr C.struct_ifa_msghdr
type IfmaMsghdr C.struct_ifma_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfZbuf C.struct_bpf_zbuf
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
type BpfZbufHeader C.struct_bpf_zbuf_header
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLINIGNEOF = C.POLLINIGNEOF
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Capabilities
type CapRights C.struct_cap_rights
// Uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

View File

@@ -1,300 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
type Statvfs_t C.struct_statvfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type Fsid C.fsid_t
// File system limits
const (
PathMax = C.PATH_MAX
)
// Fstatvfs/Statvfs flags
const (
ST_WAIT = C.ST_WAIT
ST_NOWAIT = C.ST_NOWAIT
)
// Advice to Fadvise
const (
FADV_NORMAL = C.POSIX_FADV_NORMAL
FADV_RANDOM = C.POSIX_FADV_RANDOM
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
type Mclpool C.struct_mclpool
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
type BpfTimeval C.struct_bpf_timeval
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
type Ptmget C.struct_ptmget
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Sysctl
type Sysctlnode C.struct_sysctlnode
// Uname
type Utsname C.struct_utsname
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

View File

@@ -1,283 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
#include <dirent.h>
#include <fcntl.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <uvm/uvmexp.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
type Flock_t C.struct_flock
type Dirent C.struct_dirent
type Fsid C.fsid_t
// File system limits
const (
PathMax = C.PATH_MAX
)
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
)
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent
// Select
type FdSet C.fd_set
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type IfAnnounceMsghdr C.struct_if_announcemsghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
type Mclpool C.struct_mclpool
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfHdr C.struct_bpf_hdr
type BpfTimeval C.struct_bpf_timeval
// Terminal handling
type Termios C.struct_termios
type Winsize C.struct_winsize
// fchmodat-like syscalls.
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)
// Signal Sets
type Sigset_t C.sigset_t
// Uname
type Utsname C.struct_utsname
// Uvmexp
const SizeofUvmexp = C.sizeof_struct_uvmexp
type Uvmexp C.struct_uvmexp
// Clockinfo
const SizeofClockinfo = C.sizeof_struct_clockinfo
type Clockinfo C.struct_clockinfo

View File

@@ -1,269 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
Input to cgo -godefs. See README.md
*/
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
package unix
/*
#define KERNEL
// These defines ensure that builds done on newer versions of Solaris are
// backwards-compatible with older versions of Solaris and
// OpenSolaris-based derivatives.
#define __USE_SUNOS_SOCKETS__ // msghdr
#define __USE_LEGACY_PROTOTYPES__ // iovec
#include <dirent.h>
#include <fcntl.h>
#include <netdb.h>
#include <limits.h>
#include <poll.h>
#include <signal.h>
#include <termios.h>
#include <termio.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/select.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/time.h>
#include <sys/times.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/tcp.h>
#include <ustat.h>
#include <utime.h>
enum {
sizeofPtr = sizeof(void*),
};
union sockaddr_all {
struct sockaddr s1; // this one gets used for fields
struct sockaddr_in s2; // these pad it out
struct sockaddr_in6 s3;
struct sockaddr_un s4;
struct sockaddr_dl s5;
};
struct sockaddr_any {
struct sockaddr addr;
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
};
*/
import "C"
// Machine characteristics
const (
SizeofPtr = C.sizeofPtr
SizeofShort = C.sizeof_short
SizeofInt = C.sizeof_int
SizeofLong = C.sizeof_long
SizeofLongLong = C.sizeof_longlong
PathMax = C.PATH_MAX
MaxHostNameLen = C.MAXHOSTNAMELEN
)
// Basic types
type (
_C_short C.short
_C_int C.int
_C_long C.long
_C_long_long C.longlong
)
// Time
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
type Tms C.struct_tms
type Utimbuf C.struct_utimbuf
// Processes
type Rusage C.struct_rusage
type Rlimit C.struct_rlimit
type _Gid_t C.gid_t
// Files
type Stat_t C.struct_stat
type Flock_t C.struct_flock
type Dirent C.struct_dirent
// Filesystems
type _Fsblkcnt_t C.fsblkcnt_t
type Statvfs_t C.struct_statvfs
// Sockets
type RawSockaddrInet4 C.struct_sockaddr_in
type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
type _Socklen C.socklen_t
type Linger C.struct_linger
type Iovec C.struct_iovec
type IPMreq C.struct_ip_mreq
type IPv6Mreq C.struct_ipv6_mreq
type Msghdr C.struct_msghdr
type Cmsghdr C.struct_cmsghdr
type Inet4Pktinfo C.struct_in_pktinfo
type Inet6Pktinfo C.struct_in6_pktinfo
type IPv6MTUInfo C.struct_ip6_mtuinfo
type ICMPv6Filter C.struct_icmp6_filter
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Select
type FdSet C.fd_set
// Misc
type Utsname C.struct_utsname
type Ustat_t C.struct_ustat
const (
AT_FDCWD = C.AT_FDCWD
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_REMOVEDIR = C.AT_REMOVEDIR
AT_EACCESS = C.AT_EACCESS
)
// Routing and interface messages
const (
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
SizeofIfData = C.sizeof_struct_if_data
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
SizeofRtMetrics = C.sizeof_struct_rt_metrics
)
type IfMsghdr C.struct_if_msghdr
type IfData C.struct_if_data
type IfaMsghdr C.struct_ifa_msghdr
type RtMsghdr C.struct_rt_msghdr
type RtMetrics C.struct_rt_metrics
// Berkeley packet filter
const (
SizeofBpfVersion = C.sizeof_struct_bpf_version
SizeofBpfStat = C.sizeof_struct_bpf_stat
SizeofBpfProgram = C.sizeof_struct_bpf_program
SizeofBpfInsn = C.sizeof_struct_bpf_insn
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
)
type BpfVersion C.struct_bpf_version
type BpfStat C.struct_bpf_stat
type BpfProgram C.struct_bpf_program
type BpfInsn C.struct_bpf_insn
type BpfTimeval C.struct_bpf_timeval
type BpfHdr C.struct_bpf_hdr
// Terminal handling
type Termios C.struct_termios
type Termio C.struct_termio
type Winsize C.struct_winsize
// poll
type PollFd C.struct_pollfd
const (
POLLERR = C.POLLERR
POLLHUP = C.POLLHUP
POLLIN = C.POLLIN
POLLNVAL = C.POLLNVAL
POLLOUT = C.POLLOUT
POLLPRI = C.POLLPRI
POLLRDBAND = C.POLLRDBAND
POLLRDNORM = C.POLLRDNORM
POLLWRBAND = C.POLLWRBAND
POLLWRNORM = C.POLLWRNORM
)

View File

@@ -1,142 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"log"
"strings"
"golang.org/x/text/internal/gen"
)
type registry struct {
XMLName xml.Name `xml:"registry"`
Updated string `xml:"updated"`
Registry []struct {
ID string `xml:"id,attr"`
Record []struct {
Name string `xml:"name"`
Xref []struct {
Type string `xml:"type,attr"`
Data string `xml:"data,attr"`
} `xml:"xref"`
Desc struct {
Data string `xml:",innerxml"`
// Any []struct {
// Data string `xml:",chardata"`
// } `xml:",any"`
// Data string `xml:",chardata"`
} `xml:"description,"`
MIB string `xml:"value"`
Alias []string `xml:"alias"`
MIME string `xml:"preferred_alias"`
} `xml:"record"`
} `xml:"registry"`
}
func main() {
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
reg := &registry{}
if err := xml.NewDecoder(r).Decode(&reg); err != nil && err != io.EOF {
log.Fatalf("Error decoding charset registry: %v", err)
}
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
}
w := &bytes.Buffer{}
fmt.Fprintf(w, "const (\n")
for _, rec := range reg.Registry[0].Record {
constName := ""
for _, a := range rec.Alias {
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
// Some of the constant definitions have comments in them. Strip those.
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
}
}
if constName == "" {
switch rec.MIB {
case "2085":
constName = "HZGB2312" // Not listed as alias for some reason.
default:
log.Fatalf("No cs alias defined for %s.", rec.MIB)
}
}
if rec.MIME != "" {
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
}
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
if len(rec.Desc.Data) > 0 {
fmt.Fprint(w, "// ")
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
inElem := true
attr := ""
for {
t, err := d.Token()
if err != nil {
if err != io.EOF {
log.Fatal(err)
}
break
}
switch x := t.(type) {
case xml.CharData:
attr = "" // Don't need attribute info.
a := bytes.Split([]byte(x), []byte("\n"))
for i, b := range a {
if b = bytes.TrimSpace(b); len(b) != 0 {
if !inElem && i > 0 {
fmt.Fprint(w, "\n// ")
}
inElem = false
fmt.Fprintf(w, "%s ", string(b))
}
}
case xml.StartElement:
if x.Name.Local == "xref" {
inElem = true
use := false
for _, a := range x.Attr {
if a.Name.Local == "type" {
use = use || a.Value != "person"
}
if a.Name.Local == "data" && use {
// Patch up URLs to use https. From some links, the
// https version is different from the http one.
s := a.Value
s = strings.Replace(s, "http://", "https://", -1)
s = strings.Replace(s, "/unicode/", "/", -1)
attr = s + " "
}
}
}
case xml.EndElement:
inElem = false
fmt.Fprint(w, attr)
}
}
fmt.Fprint(w, "\n")
}
for _, x := range rec.Xref {
switch x.Type {
case "rfc":
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
case "uri":
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
}
}
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
fmt.Fprintln(w)
}
fmt.Fprintln(w, ")")
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
}

View File

@@ -1,133 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"flag"
"log"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
var outputFile = flag.String("out", "tables.go", "output file")
func main() {
gen.Init()
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
genTables()
}
// bidiClass names and codes taken from class "bc" in
// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
var bidiClass = map[string]Class{
"AL": AL, // ArabicLetter
"AN": AN, // ArabicNumber
"B": B, // ParagraphSeparator
"BN": BN, // BoundaryNeutral
"CS": CS, // CommonSeparator
"EN": EN, // EuropeanNumber
"ES": ES, // EuropeanSeparator
"ET": ET, // EuropeanTerminator
"L": L, // LeftToRight
"NSM": NSM, // NonspacingMark
"ON": ON, // OtherNeutral
"R": R, // RightToLeft
"S": S, // SegmentSeparator
"WS": WS, // WhiteSpace
"FSI": Control,
"PDF": Control,
"PDI": Control,
"LRE": Control,
"LRI": Control,
"LRO": Control,
"RLE": Control,
"RLI": Control,
"RLO": Control,
}
func genTables() {
if numClass > 0x0F {
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
}
w := gen.NewCodeWriter()
defer w.WriteVersionedGoFile(*outputFile, "bidi")
gen.WriteUnicodeVersion(w)
t := triegen.NewTrie("bidi")
// Build data about bracket mapping. These bits need to be or-ed with
// any other bits.
orMask := map[rune]uint64{}
xorMap := map[rune]int{}
xorMasks := []rune{0} // First value is no-op.
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
r1 := p.Rune(0)
r2 := p.Rune(1)
xor := r1 ^ r2
if _, ok := xorMap[xor]; !ok {
xorMap[xor] = len(xorMasks)
xorMasks = append(xorMasks, xor)
}
entry := uint64(xorMap[xor]) << xorMaskShift
switch p.String(2) {
case "o":
entry |= openMask
case "c", "n":
default:
log.Fatalf("Unknown bracket class %q.", p.String(2))
}
orMask[r1] = entry
})
w.WriteComment(`
xorMasks contains masks to be xor-ed with brackets to get the reverse
version.`)
w.WriteVar("xorMasks", xorMasks)
done := map[rune]bool{}
insert := func(r rune, c Class) {
if !done[r] {
t.Insert(r, orMask[r]|uint64(c))
done[r] = true
}
}
// Insert the derived BiDi properties.
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
r := p.Rune(0)
class, ok := bidiClass[p.String(1)]
if !ok {
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
}
insert(r, class)
})
visitDefaults(insert)
// TODO: use sparse blocks. This would reduce table size considerably
// from the looks of it.
sz, err := t.Gen(w)
if err != nil {
log.Fatal(err)
}
w.Size += sz
}
// dummy values to make methods in gen_common compile. The real versions
// will be generated by this file to tables.go.
var (
xorMasks []rune
)

View File

@@ -1,57 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/rangetable"
)
// These tables are hand-extracted from:
// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
func visitDefaults(fn func(r rune, c Class)) {
// first write default values for ranges listed above.
visitRunes(fn, AL, []rune{
0x0600, 0x07BF, // Arabic
0x08A0, 0x08FF, // Arabic Extended-A
0xFB50, 0xFDCF, // Arabic Presentation Forms
0xFDF0, 0xFDFF,
0xFE70, 0xFEFF,
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
})
visitRunes(fn, R, []rune{
0x0590, 0x05FF, // Hebrew
0x07C0, 0x089F, // Nko et al.
0xFB1D, 0xFB4F,
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
0x0001E800, 0x0001EDFF,
0x0001EF00, 0x0001EFFF,
})
visitRunes(fn, ET, []rune{ // European Terminator
0x20A0, 0x20Cf, // Currency symbols
})
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
fn(r, BN) // Boundary Neutral
})
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
if p.String(1) == "Default_Ignorable_Code_Point" {
fn(p.Rune(0), BN) // Boundary Neutral
}
})
}
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
for i := 0; i < len(runes); i += 2 {
lo, hi := runes[i], runes[i+1]
for j := lo; j <= hi; j++ {
fn(j, c)
}
}
}

View File

@@ -1,64 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)

View File

@@ -1,986 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Normalization table generator.
// Data read from the web.
// See forminfo.go for a description of the trie values associated with each rune.
package main
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"log"
"sort"
"strconv"
"strings"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
func main() {
gen.Init()
loadUnicodeData()
compactCCC()
loadCompositionExclusions()
completeCharFields(FCanonical)
completeCharFields(FCompatibility)
computeNonStarterCounts()
verifyComputed()
printChars()
testDerived()
printTestdata()
makeTables()
}
var (
tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
)
const MaxChar = 0x10FFFF // anything above this shouldn't exist
// Quick Check properties of runes allow us to quickly
// determine whether a rune may occur in a normal form.
// For a given normal form, a rune may be guaranteed to occur
// verbatim (QC=Yes), may or may not combine with another
// rune (QC=Maybe), or may not occur (QC=No).
type QCResult int
const (
QCUnknown QCResult = iota
QCYes
QCNo
QCMaybe
)
func (r QCResult) String() string {
switch r {
case QCYes:
return "Yes"
case QCNo:
return "No"
case QCMaybe:
return "Maybe"
}
return "***UNKNOWN***"
}
const (
FCanonical = iota // NFC or NFD
FCompatibility // NFKC or NFKD
FNumberOfFormTypes
)
const (
MComposed = iota // NFC or NFKC
MDecomposed // NFD or NFKD
MNumberOfModes
)
// This contains only the properties we're interested in.
type Char struct {
name string
codePoint rune // if zero, this index is not a valid code point.
ccc uint8 // canonical combining class
origCCC uint8
excludeInComp bool // from CompositionExclusions.txt
compatDecomp bool // it has a compatibility expansion
nTrailingNonStarters uint8
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
state State
}
var chars = make([]Char, MaxChar+1)
var cccMap = make(map[uint8]uint8)
func (c Char) String() string {
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
fmt.Fprintf(buf, " state: %v\n", c.state)
fmt.Fprintf(buf, " NFC:\n")
fmt.Fprint(buf, c.forms[FCanonical])
fmt.Fprintf(buf, " NFKC:\n")
fmt.Fprint(buf, c.forms[FCompatibility])
return buf.String()
}
// In UnicodeData.txt, some ranges are marked like this:
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
// parseCharacter keeps a state variable indicating the weirdness.
type State int
const (
SNormal State = iota // known to be zero for the type
SFirst
SLast
SMissing
)
var lastChar = rune('\u0000')
func (c Char) isValid() bool {
return c.codePoint != 0 && c.state != SMissing
}
type FormInfo struct {
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
combinesForward bool // May combine with rune on the right
combinesBackward bool // May combine with rune on the left
isOneWay bool // Never appears in result
inDecomp bool // Some decompositions result in this char.
decomp Decomposition
expandedDecomp Decomposition
}
func (f FormInfo) String() string {
buf := bytes.NewBuffer(make([]byte, 0))
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
return buf.String()
}
type Decomposition []rune
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
decomp := strings.Split(s, " ")
if len(decomp) > 0 && skipfirst {
decomp = decomp[1:]
}
for _, d := range decomp {
point, err := strconv.ParseUint(d, 16, 64)
if err != nil {
return a, err
}
a = append(a, rune(point))
}
return a, nil
}
func loadUnicodeData() {
f := gen.OpenUCDFile("UnicodeData.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(ucd.CodePoint)
char := &chars[r]
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
decmap := p.String(ucd.DecompMapping)
exp, err := parseDecomposition(decmap, false)
isCompat := false
if err != nil {
if len(decmap) > 0 {
exp, err = parseDecomposition(decmap, true)
if err != nil {
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
}
isCompat = true
}
}
char.name = p.String(ucd.Name)
char.codePoint = r
char.forms[FCompatibility].decomp = exp
if !isCompat {
char.forms[FCanonical].decomp = exp
} else {
char.compatDecomp = true
}
if len(decmap) > 0 {
char.forms[FCompatibility].decomp = exp
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
}
// compactCCC converts the sparse set of CCC values to a continguous one,
// reducing the number of bits needed from 8 to 6.
func compactCCC() {
m := make(map[uint8]uint8)
for i := range chars {
c := &chars[i]
m[c.ccc] = 0
}
cccs := []int{}
for v, _ := range m {
cccs = append(cccs, int(v))
}
sort.Ints(cccs)
for i, c := range cccs {
cccMap[uint8(i)] = uint8(c)
m[uint8(c)] = uint8(i)
}
for i := range chars {
c := &chars[i]
c.origCCC = c.ccc
c.ccc = m[c.ccc]
}
if len(m) >= 1<<6 {
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
}
}
// CompositionExclusions.txt has form:
// 0958 # ...
// See https://unicode.org/reports/tr44/ for full explanation
func loadCompositionExclusions() {
f := gen.OpenUCDFile("CompositionExclusions.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
c := &chars[p.Rune(0)]
if c.excludeInComp {
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
}
c.excludeInComp = true
}
if e := p.Err(); e != nil {
log.Fatal(e)
}
}
// hasCompatDecomp returns true if any of the recursive
// decompositions contains a compatibility expansion.
// In this case, the character may not occur in NFK*.
func hasCompatDecomp(r rune) bool {
c := &chars[r]
if c.compatDecomp {
return true
}
for _, d := range c.forms[FCompatibility].decomp {
if hasCompatDecomp(d) {
return true
}
}
return false
}
// Hangul related constants.
const (
HangulBase = 0xAC00
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
JamoLBase = 0x1100
JamoLEnd = 0x1113
JamoVBase = 0x1161
JamoVEnd = 0x1176
JamoTBase = 0x11A8
JamoTEnd = 0x11C3
JamoLVTCount = 19 * 21 * 28
JamoTCount = 28
)
func isHangul(r rune) bool {
return HangulBase <= r && r < HangulEnd
}
func isHangulWithoutJamoT(r rune) bool {
if !isHangul(r) {
return false
}
r -= HangulBase
return r < JamoLVTCount && r%JamoTCount == 0
}
func ccc(r rune) uint8 {
return chars[r].ccc
}
// Insert a rune in a buffer, ordered by Canonical Combining Class.
func insertOrdered(b Decomposition, r rune) Decomposition {
n := len(b)
b = append(b, 0)
cc := ccc(r)
if cc > 0 {
// Use bubble sort.
for ; n > 0; n-- {
if ccc(b[n-1]) <= cc {
break
}
b[n] = b[n-1]
}
}
b[n] = r
return b
}
// Recursively decompose.
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
dcomp := chars[r].forms[form].decomp
if len(dcomp) == 0 {
return insertOrdered(d, r)
}
for _, c := range dcomp {
d = decomposeRecursive(form, c, d)
}
return d
}
func completeCharFields(form int) {
// Phase 0: pre-expand decomposition.
for i := range chars {
f := &chars[i].forms[form]
if len(f.decomp) == 0 {
continue
}
exp := make(Decomposition, 0)
for _, c := range f.decomp {
exp = decomposeRecursive(form, c, exp)
}
f.expandedDecomp = exp
}
// Phase 1: composition exclusion, mark decomposition.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
// Marks script-specific exclusions and version restricted.
f.isOneWay = c.excludeInComp
// Singletons
f.isOneWay = f.isOneWay || len(f.decomp) == 1
// Non-starter decompositions
if len(f.decomp) > 1 {
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
f.isOneWay = f.isOneWay || chk
}
// Runes that decompose into more than two runes.
f.isOneWay = f.isOneWay || len(f.decomp) > 2
if form == FCompatibility {
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
}
for _, r := range f.decomp {
chars[r].forms[form].inDecomp = true
}
}
// Phase 2: forward and backward combining.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
if !f.isOneWay && len(f.decomp) == 2 {
f0 := &chars[f.decomp[0]].forms[form]
f1 := &chars[f.decomp[1]].forms[form]
if !f0.isOneWay {
f0.combinesForward = true
}
if !f1.isOneWay {
f1.combinesBackward = true
}
}
if isHangulWithoutJamoT(rune(i)) {
f.combinesForward = true
}
}
// Phase 3: quick check values.
for i := range chars {
c := &chars[i]
f := &c.forms[form]
switch {
case len(f.decomp) > 0:
f.quickCheck[MDecomposed] = QCNo
case isHangul(rune(i)):
f.quickCheck[MDecomposed] = QCNo
default:
f.quickCheck[MDecomposed] = QCYes
}
switch {
case f.isOneWay:
f.quickCheck[MComposed] = QCNo
case (i & 0xffff00) == JamoLBase:
f.quickCheck[MComposed] = QCYes
if JamoLBase <= i && i < JamoLEnd {
f.combinesForward = true
}
if JamoVBase <= i && i < JamoVEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
f.combinesForward = true
}
if JamoTBase <= i && i < JamoTEnd {
f.quickCheck[MComposed] = QCMaybe
f.combinesBackward = true
}
case !f.combinesBackward:
f.quickCheck[MComposed] = QCYes
default:
f.quickCheck[MComposed] = QCMaybe
}
}
}
func computeNonStarterCounts() {
// Phase 4: leading and trailing non-starter count
for i := range chars {
c := &chars[i]
runes := []rune{rune(i)}
// We always use FCompatibility so that the CGJ insertion points do not
// change for repeated normalizations with different forms.
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
runes = exp
}
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, r := range runes {
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nLeadingNonStarters++
}
for i := len(runes) - 1; i >= 0; i-- {
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
break
}
c.nTrailingNonStarters++
}
if c.nTrailingNonStarters > 3 {
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
}
if isHangul(rune(i)) {
c.nTrailingNonStarters = 2
if isHangulWithoutJamoT(rune(i)) {
c.nTrailingNonStarters = 1
}
}
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
}
if t := c.nTrailingNonStarters; t > 3 {
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
}
}
}
func printBytes(w io.Writer, b []byte, name string) {
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
fmt.Fprintf(w, "var %s = [...]byte {", name)
for i, c := range b {
switch {
case i%64 == 0:
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
case i%8 == 0:
fmt.Fprintf(w, "\n")
}
fmt.Fprintf(w, "0x%.2X, ", c)
}
fmt.Fprint(w, "\n}\n\n")
}
// See forminfo.go for format.
func makeEntry(f *FormInfo, c *Char) uint16 {
e := uint16(0)
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
e |= 0x40
}
if f.combinesForward {
e |= 0x20
}
if f.quickCheck[MDecomposed] == QCNo {
e |= 0x4
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
e |= 0x10
case QCMaybe:
e |= 0x18
default:
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
}
e |= uint16(c.nTrailingNonStarters)
return e
}
// decompSet keeps track of unique decompositions, grouped by whether
// the decomposition is followed by a trailing and/or leading CCC.
type decompSet [7]map[string]bool
const (
normalDecomp = iota
firstMulti
firstCCC
endMulti
firstLeadingCCC
firstCCCZeroExcept
firstStarterWithNLead
lastDecomp
)
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
func makeDecompSet() decompSet {
m := decompSet{}
for i := range m {
m[i] = make(map[string]bool)
}
return m
}
func (m *decompSet) insert(key int, s string) {
m[key][s] = true
}
func printCharInfoTables(w io.Writer) int {
mkstr := func(r rune, f *FormInfo) (int, string) {
d := f.expandedDecomp
s := string([]rune(d))
if max := 1 << 6; len(s) >= max {
const msg = "%U: too many bytes in decomposition: %d >= %d"
log.Fatalf(msg, r, len(s), max)
}
head := uint8(len(s))
if f.quickCheck[MComposed] != QCYes {
head |= 0x40
}
if f.combinesForward {
head |= 0x80
}
s = string([]byte{head}) + s
lccc := ccc(d[0])
tccc := ccc(d[len(d)-1])
cc := ccc(r)
if cc != 0 && lccc == 0 && tccc == 0 {
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
}
if tccc < lccc && lccc != 0 {
const msg = "%U: lccc (%d) must be <= tcc (%d)"
log.Fatalf(msg, r, lccc, tccc)
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2
tccc |= nTrail
s += string([]byte{tccc})
index = endMulti
for _, r := range d[1:] {
if ccc(r) == 0 {
index = firstCCC
}
}
if lccc > 0 || nLead > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
}
index = firstLeadingCCC
}
if cc != lccc {
if cc != 0 {
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
}
index = firstCCCZeroExcept
}
} else if len(d) > 1 {
index = firstMulti
}
return index, s
}
decompSet := makeDecompSet()
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
decompSet.insert(firstStarterWithNLead, nLeadStr)
// Store the uniqued decompositions in a byte buffer,
// preceded by their byte length.
for _, c := range chars {
for _, f := range c.forms {
if len(f.expandedDecomp) == 0 {
continue
}
if f.combinesBackward {
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
}
index, s := mkstr(c.codePoint, &f)
decompSet.insert(index, s)
}
}
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
size := 0
positionMap := make(map[string]uint16)
decompositions.WriteString("\000")
fmt.Fprintln(w, "const (")
for i, m := range decompSet {
sa := []string{}
for s := range m {
sa = append(sa, s)
}
sort.Strings(sa)
for _, s := range sa {
p := decompositions.Len()
decompositions.WriteString(s)
positionMap[s] = uint16(p)
}
if cname[i] != "" {
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
}
}
fmt.Fprintln(w, "maxDecomp = 0x8000")
fmt.Fprintln(w, ")")
b := decompositions.Bytes()
printBytes(w, b, "decomps")
size += len(b)
varnames := []string{"nfc", "nfkc"}
for i := 0; i < FNumberOfFormTypes; i++ {
trie := triegen.NewTrie(varnames[i])
for r, c := range chars {
f := c.forms[i]
d := f.expandedDecomp
if len(d) != 0 {
_, key := mkstr(c.codePoint, &f)
trie.Insert(rune(r), uint64(positionMap[key]))
if c.ccc != ccc(d[0]) {
// We assume the lead ccc of a decomposition !=0 in this case.
if ccc(d[0]) == 0 {
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
}
}
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
// Handle cases where it can't be detected that the nLead should be equal
// to nTrail.
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
trie.Insert(c.codePoint, uint64(0x8000|v))
}
}
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
if err != nil {
log.Fatal(err)
}
size += sz
}
return size
}
func contains(sa []string, s string) bool {
for _, a := range sa {
if a == s {
return true
}
}
return false
}
func makeTables() {
w := &bytes.Buffer{}
size := 0
if *tablelist == "" {
return
}
list := strings.Split(*tablelist, ",")
if *tablelist == "all" {
list = []string{"recomp", "info"}
}
// Compute maximum decomposition size.
max := 0
for _, c := range chars {
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
max = n
}
}
fmt.Fprintln(w, `import "sync"`)
fmt.Fprintln(w)
fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
fmt.Fprintln(w)
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Fprintln(w, ")\n")
// Print the CCC remap table.
size += len(cccMap)
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
for i := 0; i < len(cccMap); i++ {
if i%8 == 0 {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
}
fmt.Fprintln(w, "\n}\n")
if contains(list, "info") {
size += printCharInfoTables(w)
}
if contains(list, "recomp") {
// Note that we use 32 bit keys, instead of 64 bit.
// This clips the bits of three entries, but we know
// this won't cause a collision. The compiler will catch
// any changes made to UnicodeData.txt that introduces
// a collision.
// Note that the recomposition map for NFC and NFKC
// are identical.
// Recomposition map
nrentries := 0
for _, c := range chars {
f := c.forms[FCanonical]
if !f.isOneWay && len(f.decomp) > 0 {
nrentries++
}
}
sz := nrentries * 8
size += sz
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
fmt.Fprintln(w, "var recompMap map[uint32]rune")
fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
fmt.Fprintln(w, `const recompMapPacked = "" +`)
var buf [8]byte
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
if !f.isOneWay && len(d) > 0 {
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
binary.BigEndian.PutUint32(buf[:4], key)
binary.BigEndian.PutUint32(buf[4:], uint32(i))
fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
}
}
// hack so we don't have to special case the trailing plus sign
fmt.Fprintf(w, ` ""`)
fmt.Fprintln(w)
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
}
func printChars() {
if *verbose {
for _, c := range chars {
if !c.isValid() || c.state == SMissing {
continue
}
fmt.Println(c)
}
}
}
// verifyComputed does various consistency tests.
func verifyComputed() {
for i, c := range chars {
for _, f := range c.forms {
isNo := (f.quickCheck[MDecomposed] == QCNo)
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
}
isMaybe := f.quickCheck[MComposed] == QCMaybe
if f.combinesBackward != isMaybe {
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
}
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
}
if len(f.expandedDecomp) != 0 {
continue
}
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
// We accept these runes to be treated differently (it only affects
// segment breaking in iteration, most likely on improper use), but
// reconsider if more characters are added.
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
}
}
}
nfc := c.forms[FCanonical]
nfkc := c.forms[FCompatibility]
if nfc.combinesBackward != nfkc.combinesBackward {
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
}
}
}
// Use values in DerivedNormalizationProps.txt to compare against the
// values we computed.
// DerivedNormalizationProps.txt has form:
// 00C0..00C5 ; NFD_QC; N # ...
// 0374 ; NFD_QC; N # ...
// See https://unicode.org/reports/tr44/ for full explanation
func testDerived() {
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
r := p.Rune(0)
c := &chars[r]
var ftype, mode int
qt := p.String(1)
switch qt {
case "NFC_QC":
ftype, mode = FCanonical, MComposed
case "NFD_QC":
ftype, mode = FCanonical, MDecomposed
case "NFKC_QC":
ftype, mode = FCompatibility, MComposed
case "NFKD_QC":
ftype, mode = FCompatibility, MDecomposed
default:
continue
}
var qr QCResult
switch p.String(2) {
case "Y":
qr = QCYes
case "N":
qr = QCNo
case "M":
qr = QCMaybe
default:
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
}
if got := c.forms[ftype].quickCheck[mode]; got != qr {
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
}
c.forms[ftype].verified[mode] = true
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
// Any unspecified value must be QCYes. Verify this.
for i, c := range chars {
for j, fd := range c.forms {
for k, qr := range fd.quickCheck {
if !fd.verified[k] && qr != QCYes {
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
log.Printf(m, i, j, k, qr, c.name)
}
}
}
}
}
var testHeader = `const (
Yes = iota
No
Maybe
)
type formData struct {
qc uint8
combinesForward bool
decomposition string
}
type runeData struct {
r rune
ccc uint8
nLead uint8
nTrail uint8
f [2]formData // 0: canonical; 1: compatibility
}
func f(qc uint8, cf bool, dec string) [2]formData {
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
}
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
}
var testData = []runeData{
`
func printTestdata() {
type lastInfo struct {
ccc uint8
nLead uint8
nTrail uint8
f string
}
last := lastInfo{}
w := &bytes.Buffer{}
fmt.Fprintf(w, testHeader)
for r, c := range chars {
f := c.forms[FCanonical]
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
f = c.forms[FCompatibility]
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
s := ""
if d == dk && qc == qck && cf == cfk {
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
} else {
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
}
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
if last != current {
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
last = current
}
}
fmt.Fprintln(w, "}")
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
}

View File

@@ -1,117 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Trie table generator.
// Used by make*tables tools to generate a go file with trie data structures
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
// sequence are used to lookup offsets in the index table to be used for the
// next byte. The last byte is used to index into a table with 16-bit values.
package main
import (
"fmt"
"io"
)
const maxSparseEntries = 16
type normCompacter struct {
sparseBlocks [][]uint64
sparseOffset []uint16
sparseCount int
name string
}
func mostFrequentStride(a []uint64) int {
counts := make(map[int]int)
var v int
for _, x := range a {
if stride := int(x) - v; v != 0 && stride >= 0 {
counts[stride]++
}
v = int(x)
}
var maxs, maxc int
for stride, cnt := range counts {
if cnt > maxc || (cnt == maxc && stride < maxs) {
maxs, maxc = stride, cnt
}
}
return maxs
}
func countSparseEntries(a []uint64) int {
stride := mostFrequentStride(a)
var v, count int
for _, tv := range a {
if int(tv)-v != stride {
if tv != 0 {
count++
}
}
v = int(tv)
}
return count
}
func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
if n := countSparseEntries(v); n <= maxSparseEntries {
return (n+1)*4 + 2, true
}
return 0, false
}
func (c *normCompacter) Store(v []uint64) uint32 {
h := uint32(len(c.sparseOffset))
c.sparseBlocks = append(c.sparseBlocks, v)
c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
c.sparseCount += countSparseEntries(v) + 1
return h
}
func (c *normCompacter) Handler() string {
return c.name + "Sparse.lookup"
}
func (c *normCompacter) Print(w io.Writer) (retErr error) {
p := func(f string, x ...interface{}) {
if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
retErr = err
}
}
ls := len(c.sparseBlocks)
p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
ns := c.sparseCount
p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
p("var %sSparseValues = [%d]valueRange {", c.name, ns)
for i, b := range c.sparseBlocks {
p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
var v int
stride := mostFrequentStride(b)
n := countSparseEntries(b)
p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
for i, nv := range b {
if int(nv)-v != stride {
if v != 0 {
p(",hi:%#02x},", 0x80+i-1)
}
if nv != 0 {
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
}
}
v = int(nv)
}
if v != 0 {
p(",hi:%#02x},", 0x80+len(b)-1)
}
}
p("\n}\n\n")
return
}

Some files were not shown because too many files have changed in this diff Show More