DBPW - Enables AutoMTLS for DB plugins (#10220)

This also temporarily disables couchbase, elasticsearch, and
mongodbatlas because the `Serve` function needs to change signatures
and those plugins are vendored in from external repos, causing problems
when building.
This commit is contained in:
Michael Golowka
2020-10-22 15:43:19 -06:00
committed by GitHub
parent 0510cdf275
commit d87657199d
221 changed files with 348 additions and 40021 deletions

View File

@@ -10,15 +10,16 @@ import (
"testing"
"time"
// mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
"github.com/go-test/deep"
mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/namespace"
postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/plugins/database/mongodb"
"github.com/hashicorp/vault/plugins/database/postgresql"
"github.com/hashicorp/vault/sdk/database/dbplugin"
v4 "github.com/hashicorp/vault/sdk/database/dbplugin"
v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/consts"
@@ -48,73 +49,58 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Postgres", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_Mongo", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoAtlas", []string{}, "")
// vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "TestBackend_PluginMain_MongoAtlas", []string{}, "")
return cluster, sys
}
func TestBackend_PluginMain_Postgres(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
}
caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPEM == "" {
t.Fatal("CA cert not passed in")
dbType, err := postgresql.New()
if err != nil {
t.Fatalf("Failed to initialize postgres: %s", err)
}
args := []string{"--ca-cert=" + caPEM}
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(args)
postgresql.Run(apiClientMeta.GetTLSConfig())
v5.Serve(dbType.(v5.Database))
}
func TestBackend_PluginMain_Mongo(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
}
caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPEM == "" {
t.Fatal("CA cert not passed in")
}
args := []string{"--ca-cert=" + caPEM}
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(args)
err := mongodb.Run(apiClientMeta.GetTLSConfig())
dbType, err := mongodb.New()
if err != nil {
t.Fatal(err)
t.Fatalf("Failed to initialize mongodb: %s", err)
}
v5.Serve(dbType.(v5.Database))
}
func TestBackend_PluginMain_MongoAtlas(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
return
}
caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPEM == "" {
t.Fatal("CA cert not passed in")
}
args := []string{"--ca-cert=" + caPEM}
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(args)
err := mongodbatlas.Run(apiClientMeta.GetTLSConfig())
if err != nil {
t.Fatal(err)
}
}
// func TestBackend_PluginMain_MongoAtlas(t *testing.T) {
// if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
// return
// }
//
// caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
// if caPEM == "" {
// t.Fatal("CA cert not passed in")
// }
//
// args := []string{"--ca-cert=" + caPEM}
//
// apiClientMeta := &api.PluginAPIClientMeta{}
// flags := apiClientMeta.FlagSet()
// flags.Parse(args)
//
// err := mongodbatlas.Run(apiClientMeta.GetTLSConfig())
// if err != nil {
// t.Fatal(err)
// }
// }
func TestBackend_RoleUpgrade(t *testing.T) {
@@ -122,14 +108,14 @@ func TestBackend_RoleUpgrade(t *testing.T) {
backend := &databaseBackend{}
roleExpected := &roleEntry{
Statements: dbplugin.Statements{
Statements: v4.Statements{
CreationStatements: "test",
Creation: []string{"test"},
},
}
entry, err := logical.StorageEntryJSON("role/test", &roleEntry{
Statements: dbplugin.Statements{
Statements: v4.Statements{
CreationStatements: "test",
},
})
@@ -858,14 +844,14 @@ func TestBackend_roleCrud(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
expected := dbplugin.Statements{
expected := v4.Statements{
Creation: []string{strings.TrimSpace(testRole)},
Revocation: []string{strings.TrimSpace(defaultRevocationSQL)},
Rollback: []string{},
Renewal: []string{},
}
actual := dbplugin.Statements{
actual := v4.Statements{
Creation: resp.Data["creation_statements"].([]string),
Revocation: resp.Data["revocation_statements"].([]string),
Rollback: resp.Data["rollback_statements"].([]string),
@@ -917,14 +903,14 @@ func TestBackend_roleCrud(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
expected := dbplugin.Statements{
expected := v4.Statements{
Creation: []string{strings.TrimSpace(testRole)},
Revocation: []string{strings.TrimSpace(defaultRevocationSQL)},
Rollback: []string{},
Renewal: []string{},
}
actual := dbplugin.Statements{
actual := v4.Statements{
Creation: resp.Data["creation_statements"].([]string),
Revocation: resp.Data["revocation_statements"].([]string),
Rollback: resp.Data["rollback_statements"].([]string),
@@ -980,14 +966,14 @@ func TestBackend_roleCrud(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
expected := dbplugin.Statements{
expected := v4.Statements{
Creation: []string{strings.TrimSpace(testRole), strings.TrimSpace(testRole)},
Rollback: []string{strings.TrimSpace(testRole)},
Revocation: []string{strings.TrimSpace(defaultRevocationSQL), strings.TrimSpace(defaultRevocationSQL)},
Renewal: []string{strings.TrimSpace(defaultRevocationSQL)},
}
actual := dbplugin.Statements{
actual := v4.Statements{
Creation: resp.Data["creation_statements"].([]string),
Revocation: resp.Data["revocation_statements"].([]string),
Rollback: resp.Data["rollback_statements"].([]string),

View File

@@ -6,7 +6,6 @@ import (
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
@@ -26,13 +25,13 @@ func New() (interface{}, error) {
}
// Run instantiates a MongoDB object, and runs the RPC server for the plugin
func RunV5(apiTLSConfig *api.TLSConfig) error {
func RunV5() error {
dbType, err := New()
if err != nil {
return err
}
v5.Serve(dbType.(v5.Database), api.VaultPluginTLSProvider(apiTLSConfig))
v5.Serve(dbType.(v5.Database))
return nil
}

View File

@@ -250,22 +250,11 @@ func TestBackend_PluginMain_MockV4(t *testing.T) {
}
func TestBackend_PluginMain_MockV5(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
}
caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPEM == "" {
t.Fatal("CA cert not passed in")
}
args := []string{"--ca-cert=" + caPEM}
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(args)
RunV5(apiClientMeta.GetTLSConfig())
RunV5()
}
func assertNoRespData(t *testing.T, resp *logical.Response) {

View File

@@ -353,8 +353,8 @@ func TestPredict_Plugins(t *testing.T) {
"cert",
"cf",
"consul",
"couchbase-database-plugin",
"elasticsearch-database-plugin",
// "couchbase-database-plugin",
// "elasticsearch-database-plugin",
"gcp",
"gcpkms",
"github",
@@ -369,7 +369,7 @@ func TestPredict_Plugins(t *testing.T) {
"mongodb",
"mongodb-database-plugin",
"mongodbatlas",
"mongodbatlas-database-plugin",
// "mongodbatlas-database-plugin",
"mssql",
"mssql-database-plugin",
"mysql",

5
go.mod
View File

@@ -29,6 +29,7 @@ require (
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0
github.com/client9/misspell v0.3.4
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe // indirect
github.com/coreos/go-semver v0.2.0
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc
github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible
@@ -67,6 +68,7 @@ require (
github.com/hashicorp/go-sockaddr v1.0.2
github.com/hashicorp/go-syslog v1.0.0
github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/go-version v1.2.1 // indirect
github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d
@@ -81,9 +83,6 @@ require (
github.com/hashicorp/vault-plugin-auth-kerberos v0.1.6
github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.1-0.20200921171209-a8c355e565cb
github.com/hashicorp/vault-plugin-auth-oci v0.5.5
github.com/hashicorp/vault-plugin-database-couchbase v0.2.0
github.com/hashicorp/vault-plugin-database-elasticsearch v0.6.0
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.0
github.com/hashicorp/vault-plugin-mock v0.16.1
github.com/hashicorp/vault-plugin-secrets-ad v0.7.1-0.20201009192637-c613b2a27345
github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5

12
go.sum
View File

@@ -249,10 +249,6 @@ github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQa
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/couchbase/gocb/v2 v2.1.4 h1:HRuVhqZpVNIck3FwzTxWh5TnmGXeTmSfjhxkjeradLg=
github.com/couchbase/gocb/v2 v2.1.4/go.mod h1:lESKM6wCEajrFVSZUewYuRzNtuNtnRey5wOfcZZsH90=
github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8=
github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@@ -566,7 +562,6 @@ github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER
github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo=
github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@@ -628,12 +623,6 @@ github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.1-0.20200921171209-a8c355
github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.1-0.20200921171209-a8c355e565cb/go.mod h1:2c/k3nsoGPKV+zpAWCiajt4e66vncEq8Li/eKLqErAc=
github.com/hashicorp/vault-plugin-auth-oci v0.5.5 h1:nIP8g+VZd2V+LY/D5omWhLSnhHuogIJx7Bz6JyLt628=
github.com/hashicorp/vault-plugin-auth-oci v0.5.5/go.mod h1:Cn5cjR279Y+snw8LTaiLTko3KGrbigRbsQPOd2D5xDw=
github.com/hashicorp/vault-plugin-database-couchbase v0.2.0 h1:9nFOONMZE9TlnTHLbqzMI5sv3gYy1YzUzwVu4fOhlJg=
github.com/hashicorp/vault-plugin-database-couchbase v0.2.0/go.mod h1:iGPlVBGC/1ufQnEybsONOw3m1KMPYe+qMjIGUfHTpzA=
github.com/hashicorp/vault-plugin-database-elasticsearch v0.6.0 h1:dUx4zRp1yML7XrXKSqgE7mCZ+UclwqLZYwe9mxdlWOc=
github.com/hashicorp/vault-plugin-database-elasticsearch v0.6.0/go.mod h1:wve4k04si3gimTmCE8zXKLd9aJdUpbLjUVSbtgyxiwM=
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.0 h1:ucL+TqcUBXzO/owNn0sdJdv5KIIQ8r7+tWjdXk8F5ow=
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.0/go.mod h1:3rYFXh3YEdUrY00r39WODXqZDLzIG1pGNho2iPa99us=
github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0=
github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM=
github.com/hashicorp/vault-plugin-secrets-ad v0.7.1-0.20201009192637-c613b2a27345 h1:1/kWUsS8mE2GUsNYTC8XjKDzVf+hrL1K5HZw5/tJJ4Q=
@@ -902,7 +891,6 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/oracle/oci-go-sdk v12.5.0+incompatible h1:pr08ECoaDKHWO9tnzJB1YqClEs7ZK1CFOez2DQocH14=
github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso=

View File

@@ -10,9 +10,10 @@ import (
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase"
dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
// dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase"
// dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch"
// dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
credAppId "github.com/hashicorp/vault/builtin/credential/app-id"
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
credAws "github.com/hashicorp/vault/builtin/credential/aws"
@@ -98,16 +99,16 @@ func newRegistry() *registry {
"mysql-rds-database-plugin": dbMysql.New(true),
"mysql-legacy-database-plugin": dbMysql.New(true),
"cassandra-database-plugin": dbCass.New,
"couchbase-database-plugin": dbCouchbase.New,
"elasticsearch-database-plugin": dbElastic.New,
"hana-database-plugin": dbHana.New,
"influxdb-database-plugin": dbInflux.New,
"mongodb-database-plugin": dbMongo.New,
"mongodbatlas-database-plugin": dbMongoAtlas.New,
"mssql-database-plugin": dbMssql.New,
"postgresql-database-plugin": dbPostgres.New,
"redshift-database-plugin": dbRedshift.New(true),
"cassandra-database-plugin": dbCass.New,
// "couchbase-database-plugin": dbCouchbase.New,
// "elasticsearch-database-plugin": dbElastic.New,
"hana-database-plugin": dbHana.New,
"influxdb-database-plugin": dbInflux.New,
"mongodb-database-plugin": dbMongo.New,
// "mongodbatlas-database-plugin": dbMongoAtlas.New,
"mssql-database-plugin": dbMssql.New,
"postgresql-database-plugin": dbPostgres.New,
"redshift-database-plugin": dbRedshift.New(true),
},
logicalBackends: map[string]logical.Factory{
"ad": logicalAd.Factory,

View File

@@ -4,18 +4,26 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/cassandra"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := cassandra.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a Cassandra object, and runs the RPC server for the plugin
func Run() error {
dbType, err := cassandra.New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/gocql/gocql"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/api"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
@@ -45,18 +44,6 @@ func new() *Cassandra {
}
}
// Run instantiates a Cassandra object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
// Type returns the TypeName for this backend
func (c *Cassandra) Type() (string, error) {
return cassandraTypeName, nil

View File

@@ -4,18 +4,26 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/hana"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := hana.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a HANA object, and runs the RPC server for the plugin
func Run() error {
dbType, err := hana.New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -6,15 +6,13 @@ import (
"fmt"
"strings"
"github.com/hashicorp/vault/api"
_ "github.com/SAP/go-hdb/driver"
"github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
"github.com/hashicorp/vault/sdk/helper/dbtxn"
"github.com/hashicorp/vault/sdk/helper/strutil"
_ "github.com/SAP/go-hdb/driver"
)
const (
@@ -64,18 +62,6 @@ func (h *HANA) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (
}, nil
}
// Run instantiates a HANA object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
// Type returns the TypeName for this backend
func (h *HANA) Type() (string, error) {
return hanaTypeName, nil

View File

@@ -4,18 +4,26 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/influxdb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := influxdb.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a Influxdb object, and runs the RPC server for the plugin
func Run() error {
dbType, err := influxdb.New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -6,7 +6,6 @@ import (
"strings"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/api"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
@@ -45,18 +44,6 @@ func new() *Influxdb {
}
}
// Run instantiates a Influxdb object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
// Type returns the TypeName for this backend
func (i *Influxdb) Type() (string, error) {
return influxdbTypeName, nil

View File

@@ -4,18 +4,26 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := mongodb.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a MongoDB object, and runs the RPC server for the plugin
func Run() error {
dbType, err := mongodb.New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"io"
"strings"
"github.com/hashicorp/vault/api"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
@@ -45,18 +44,6 @@ func new() *MongoDB {
}
}
// Run instantiates a MongoDB object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
// Type returns the TypeName for this backend
func (m *MongoDB) Type() (string, error) {
return mongoDBTypeName, nil

View File

@@ -4,18 +4,26 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/mssql"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := mssql.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a MSSQL object, and runs the RPC server for the plugin
func Run() error {
dbType, err := mssql.New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -10,7 +10,6 @@ import (
_ "github.com/denisenkom/go-mssqldb"
"github.com/hashicorp/errwrap"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/api"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
@@ -45,18 +44,6 @@ func new() *MSSQL {
}
}
// Run instantiates a MSSQL object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
// Type returns the TypeName for this backend
func (m *MSSQL) Type() (string, error) {
return msSQLTypeName, nil

View File

@@ -4,18 +4,28 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/mysql"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := mysql.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a MySQL object, and runs the RPC server for the plugin
func Run() error {
var f func() (interface{}, error)
f = mysql.New(false)
dbType, err := f()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -4,18 +4,28 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/mysql"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := mysql.RunLegacy(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a MySQL object, and runs the RPC server for the plugin
func Run() error {
var f func() (interface{}, error)
f = mysql.New(true)
dbType, err := f()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -9,7 +9,6 @@ import (
stdmysql "github.com/go-sql-driver/mysql"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/api"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
@@ -63,29 +62,6 @@ func new(legacy bool) *MySQL {
}
}
// Run instantiates a MySQL object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
return runCommon(false, apiTLSConfig)
}
// Run instantiates a MySQL object, and runs the RPC server for the plugin
func RunLegacy(apiTLSConfig *api.TLSConfig) error {
return runCommon(true, apiTLSConfig)
}
func runCommon(legacy bool, apiTLSConfig *api.TLSConfig) error {
var f func() (interface{}, error)
f = New(legacy)
dbType, err := f()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
func (m *MySQL) Type() (string, error) {
return mySQLTypeName, nil
}

View File

@@ -4,18 +4,26 @@ import (
"log"
"os"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/postgresql"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
apiClientMeta := &api.PluginAPIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
err := postgresql.Run(apiClientMeta.GetTLSConfig())
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin
func Run() error {
dbType, err := postgresql.New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database))
return nil
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/api"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
@@ -67,18 +66,6 @@ func new() *PostgreSQL {
return db
}
// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
type PostgreSQL struct {
*connutil.SQLConnectionProducer
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/plugins/database/redshift"
"github.com/hashicorp/vault/sdk/database/dbplugin"
)
func main() {
@@ -13,8 +14,20 @@ func main() {
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args[1:])
if err := redshift.Run(apiClientMeta.GetTLSConfig()); err != nil {
if err := Run(apiClientMeta.GetTLSConfig()); err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a RedShift object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := redshift.New(true)()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/database/dbplugin"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/credsutil"
@@ -68,18 +67,6 @@ func newRedshift(lowercaseUsername bool) *RedShift {
return db
}
// Run instantiates a RedShift object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New(true)()
if err != nil {
return err
}
dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig))
return nil
}
type RedShift struct {
*connutil.SQLConnectionProducer
credsutil.CredentialsProducer

View File

@@ -45,6 +45,7 @@ func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne
pluginutil.HandshakeConfig(handshakeConfig),
pluginutil.Logger(logger),
pluginutil.MetadataMode(isMetadataMode),
pluginutil.AutoMTLS(true),
)
if err != nil {
return nil, err

View File

@@ -1,7 +1,6 @@
package dbplugin
import (
"crypto/tls"
"fmt"
"github.com/hashicorp/go-plugin"
@@ -11,11 +10,11 @@ import (
// Serve is called from within a plugin and wraps the provided
// Database implementation in a databasePluginRPCServer object and starts a
// RPC server.
func Serve(db Database, tlsProvider func() (*tls.Config, error)) {
plugin.Serve(ServeConfig(db, tlsProvider))
func Serve(db Database) {
plugin.Serve(ServeConfig(db))
}
func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.ServeConfig {
func ServeConfig(db Database) *plugin.ServeConfig {
err := pluginutil.OptionallyEnableMlock()
if err != nil {
fmt.Println(err)
@@ -35,7 +34,6 @@ func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.S
HandshakeConfig: handshakeConfig,
VersionedPlugins: pluginSets,
GRPCServer: plugin.DefaultGRPCServer,
TLSProvider: tlsProvider,
}
return conf

View File

@@ -26,6 +26,7 @@ type runConfig struct {
hs plugin.HandshakeConfig
logger log.Logger
isMetadataMode bool
autoMTLS bool
}
func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) {
@@ -45,7 +46,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
cmd.Env = append(cmd.Env, metadataEnv)
var clientTLSConfig *tls.Config
if !rc.isMetadataMode {
if !rc.autoMTLS && !rc.isMetadataMode {
// Get a CA TLS Certificate
certBytes, key, err := generateCert()
if err != nil {
@@ -85,7 +86,7 @@ func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error
plugin.ProtocolNetRPC,
plugin.ProtocolGRPC,
},
AutoMTLS: false,
AutoMTLS: rc.autoMTLS,
}
return clientConfig, nil
}
@@ -138,6 +139,12 @@ func MetadataMode(isMetadataMode bool) RunOpt {
}
}
func AutoMTLS(autoMTLS bool) RunOpt {
return func(rc *runConfig) {
rc.autoMTLS = autoMTLS
}
}
func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) {
rc := runConfig{
command: r.Command,

View File

@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/mock"
)
func TestNameMakeConfig(t *testing.T) {
func TestMakeConfig(t *testing.T) {
type testCase struct {
rc runConfig
@@ -50,6 +50,7 @@ func TestNameMakeConfig(t *testing.T) {
},
logger: hclog.NewNullLogger(),
isMetadataMode: true,
autoMTLS: false,
},
responseWrapInfoTimes: 0,
@@ -108,6 +109,7 @@ func TestNameMakeConfig(t *testing.T) {
},
logger: hclog.NewNullLogger(),
isMetadataMode: false,
autoMTLS: false,
},
responseWrapInfo: &wrapping.ResponseWrapInfo{
@@ -153,6 +155,124 @@ func TestNameMakeConfig(t *testing.T) {
},
expectTLSConfig: true,
},
"metadata mode, AutoMTLS": {
rc: runConfig{
command: "echo",
args: []string{"foo", "bar"},
sha256: []byte("some_sha256"),
env: []string{"initial=true"},
pluginSets: map[int]plugin.PluginSet{
1: plugin.PluginSet{
"bogus": nil,
},
},
hs: plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "magic_cookie_key",
MagicCookieValue: "magic_cookie_value",
},
logger: hclog.NewNullLogger(),
isMetadataMode: true,
autoMTLS: true,
},
responseWrapInfoTimes: 0,
mlockEnabled: false,
mlockEnabledTimes: 1,
expectedConfig: &plugin.ClientConfig{
HandshakeConfig: plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "magic_cookie_key",
MagicCookieValue: "magic_cookie_value",
},
VersionedPlugins: map[int]plugin.PluginSet{
1: plugin.PluginSet{
"bogus": nil,
},
},
Cmd: commandWithEnv(
"echo",
[]string{"foo", "bar"},
[]string{
"initial=true",
fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version),
fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true),
},
),
SecureConfig: &plugin.SecureConfig{
Checksum: []byte("some_sha256"),
// Hash is generated
},
AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC,
plugin.ProtocolGRPC,
},
Logger: hclog.NewNullLogger(),
AutoMTLS: true,
},
expectTLSConfig: false,
},
"not-metadata mode, AutoMTLS": {
rc: runConfig{
command: "echo",
args: []string{"foo", "bar"},
sha256: []byte("some_sha256"),
env: []string{"initial=true"},
pluginSets: map[int]plugin.PluginSet{
1: plugin.PluginSet{
"bogus": nil,
},
},
hs: plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "magic_cookie_key",
MagicCookieValue: "magic_cookie_value",
},
logger: hclog.NewNullLogger(),
isMetadataMode: false,
autoMTLS: true,
},
responseWrapInfoTimes: 0,
mlockEnabled: false,
mlockEnabledTimes: 1,
expectedConfig: &plugin.ClientConfig{
HandshakeConfig: plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "magic_cookie_key",
MagicCookieValue: "magic_cookie_value",
},
VersionedPlugins: map[int]plugin.PluginSet{
1: plugin.PluginSet{
"bogus": nil,
},
},
Cmd: commandWithEnv(
"echo",
[]string{"foo", "bar"},
[]string{
"initial=true",
fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version),
fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false),
},
),
SecureConfig: &plugin.SecureConfig{
Checksum: []byte("some_sha256"),
// Hash is generated
},
AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC,
plugin.ProtocolGRPC,
},
Logger: hclog.NewNullLogger(),
AutoMTLS: true,
},
expectTLSConfig: false,
},
}
for name, test := range tests {
@@ -174,7 +294,6 @@ func TestNameMakeConfig(t *testing.T) {
t.Fatalf("no error expected, got: %s", err)
}
// TODO: Certain fields will need to be checked for existence, not specific value
// The following fields are generated, so we just need to check for existence, not specific value
// The value must be nilled out before performing a DeepEqual check
hsh := config.SecureConfig.Hash

View File

@@ -2045,12 +2045,12 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
"mysql-legacy-database-plugin",
"cassandra-database-plugin",
"couchbase-database-plugin",
"elasticsearch-database-plugin",
// "couchbase-database-plugin",
// "elasticsearch-database-plugin",
"hana-database-plugin",
"influxdb-database-plugin",
"mongodb-database-plugin",
"mongodbatlas-database-plugin",
// "mongodbatlas-database-plugin",
"mssql-database-plugin",
"postgresql-database-plugin",
"redshift-database-plugin",

View File

@@ -1,3 +0,0 @@
*~
.project

View File

@@ -1,3 +0,0 @@
[submodule "testdata/sdk-testcases"]
path = testdata/sdk-testcases
url = https://github.com/couchbaselabs/sdk-testcases

View File

@@ -1,18 +0,0 @@
run:
modules-download-mode: readonly
tests: false
skip-files:
- logging.go # Logging has some utility functions that are useful to have around which get flagged up
linters:
enable:
- bodyclose
- golint
- gosec
- unconvert
linters-settings:
golint:
set-exit-status: true
min-confidence: 0.81
errcheck:
check-type-assertions: true
check-blank: true

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,39 +0,0 @@
devsetup:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
go get github.com/vektra/mockery/.../
git submodule update --remote --init --recursive
test:
go test ./
fasttest:
go test -short ./
cover:
go test -coverprofile=cover.out ./
lint:
golangci-lint run -v
check: lint
go test -short -cover -race ./
bench:
go test -bench=. -run=none --disable-logger=true
updatetestcases:
git submodule update --remote --init --recursive
updatemocks:
mockery -name connectionManager -output . -testonly -inpkg
mockery -name kvProvider -output . -testonly -inpkg
mockery -name httpProvider -output . -testonly -inpkg
mockery -name diagnosticsProvider -output . -testonly -inpkg
mockery -name mgmtProvider -output . -testonly -inpkg
mockery -name analyticsProvider -output . -testonly -inpkg
mockery -name queryProvider -output . -testonly -inpkg
mockery -name searchProvider -output . -testonly -inpkg
mockery -name viewProvider -output . -testonly -inpkg
mockery -name waitUntilReadyProvider -output . -testonly -inpkg
# pendingOp is manually mocked
.PHONY: all test devsetup fasttest lint cover check bench updatetestcases updatemocks

View File

@@ -1,53 +0,0 @@
[![GoDoc](https://godoc.org/github.com/couchbase/gocb?status.png)](https://godoc.org/github.com/couchbase/gocb)
# Couchbase Go Client
This is the official Couchbase Go SDK. If you are looking for our
previous unofficial prototype Go client library, please see:
[http://www.github.com/couchbase/go-couchbase](http://www.github.com/couchbase/go-couchbase).
The Go SDK library allows you to connect to a Couchbase cluster from
Go. It is written in pure Go, and uses the included gocbcore library to
handle communicating to the cluster over the Couchbase binary
protocol.
## Useful Links
### Source
The project source is hosted at [http://github.com/couchbase/gocb](http://github.com/couchbase/gocb).
### Documentation
You can explore our API reference through godoc at [https://godoc.org/github.com/couchbase/gocb](https://godoc.org/github.com/couchbase/gocb).
You can also find documentation for the Go SDK at the Couchbase [Developer Portal](https://developer.couchbase.com/documentation/server/current/sdk/go/start-using-sdk.html).
### Bug Tracker
Issues are tracked on Couchbase's public [issues.couchbase.com](http://www.couchbase.com/issues/browse/GOCBC).
Contact [the site admins](https://issues.couchbase.com/secure/ContactAdministrators!default.jspa)
regarding login or other problems at issues.couchbase.com (officially) or ask
around in [couchbase/discuss on gitter.im](https://gitter.im/couchbase/discuss)
(unofficially).
## Installing
To install the latest stable version, run:
```bash
go get github.com/couchbase/gocb/v2
```
To install the latest developer version, run:
```bash
go get github.com/couchbase/gocb
```
## License
Copyright 2016 Couchbase Inc.
Licensed under the Apache License, Version 2.0.
See
[LICENSE](https://github.com/couchbase/gocb/blob/master/LICENSE)
for further details.

View File

@@ -1,89 +0,0 @@
package gocb
import (
"strings"
"time"
"github.com/google/uuid"
)
// AnalyticsScanConsistency indicates the level of data consistency desired for an analytics query.
type AnalyticsScanConsistency uint
const (
// AnalyticsScanConsistencyNotBounded indicates no data consistency is required.
AnalyticsScanConsistencyNotBounded AnalyticsScanConsistency = iota + 1
// AnalyticsScanConsistencyRequestPlus indicates that request-level data consistency is required.
AnalyticsScanConsistencyRequestPlus
)
// AnalyticsOptions is the set of options available to an Analytics query.
type AnalyticsOptions struct {
// ClientContextID provides a unique ID for this query which can be used matching up requests between connectionManager and
// server. If not provided will be assigned a uuid value.
ClientContextID string
// Priority sets whether this query should be assigned as high priority by the analytics engine.
Priority bool
PositionalParameters []interface{}
NamedParameters map[string]interface{}
Readonly bool
ScanConsistency AnalyticsScanConsistency
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]interface{}
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *AnalyticsOptions) toMap() (map[string]interface{}, error) {
execOpts := make(map[string]interface{})
if opts.ClientContextID == "" {
execOpts["client_context_id"] = uuid.New().String()
} else {
execOpts["client_context_id"] = opts.ClientContextID
}
if opts.ScanConsistency != 0 {
if opts.ScanConsistency == AnalyticsScanConsistencyNotBounded {
execOpts["scan_consistency"] = "not_bounded"
} else if opts.ScanConsistency == AnalyticsScanConsistencyRequestPlus {
execOpts["scan_consistency"] = "request_plus"
} else {
return nil, makeInvalidArgumentsError("unexpected consistency option")
}
}
if opts.PositionalParameters != nil && opts.NamedParameters != nil {
return nil, makeInvalidArgumentsError("positional and named parameters must be used exclusively")
}
if opts.PositionalParameters != nil {
execOpts["args"] = opts.PositionalParameters
}
if opts.NamedParameters != nil {
for key, value := range opts.NamedParameters {
if !strings.HasPrefix(key, "$") {
key = "$" + key
}
execOpts[key] = value
}
}
if opts.Readonly {
execOpts["readonly"] = true
}
if opts.Raw != nil {
for k, v := range opts.Raw {
execOpts[k] = v
}
}
return execOpts, nil
}

View File

@@ -1,36 +0,0 @@
package gocb
import (
gocbcore "github.com/couchbase/gocbcore/v9"
)
type asyncOpManager struct {
signal chan struct{}
wasResolved bool
}
func (m *asyncOpManager) Reject() {
m.signal <- struct{}{}
}
func (m *asyncOpManager) Resolve() {
m.wasResolved = true
m.signal <- struct{}{}
}
func (m *asyncOpManager) Wait(op gocbcore.PendingOp, err error) error {
if err != nil {
return err
}
<-m.signal
return nil
}
func newAsyncOpManager() *asyncOpManager {
return &asyncOpManager{
signal: make(chan struct{}, 1),
}
}

View File

@@ -1,143 +0,0 @@
package gocb
import (
"crypto/tls"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// UserPassPair represents a username and password pair.
// VOLATILE: This API is subject to change at any time.
type UserPassPair gocbcore.UserPassPair
// AuthCredsRequest encapsulates the data for a credential request
// from the new Authenticator interface.
// VOLATILE: This API is subject to change at any time.
type AuthCredsRequest struct {
Service ServiceType
Endpoint string
}
// AuthCertRequest encapsulates the data for a certificate request
// from the new Authenticator interface.
// VOLATILE: This API is subject to change at any time.
type AuthCertRequest struct {
Service ServiceType
Endpoint string
}
// Authenticator provides an interface to authenticate to each service. Note that
// only authenticators implemented via the SDK are stable.
type Authenticator interface {
// VOLATILE: This API is subject to change at any time.
SupportsTLS() bool
// VOLATILE: This API is subject to change at any time.
SupportsNonTLS() bool
// VOLATILE: This API is subject to change at any time.
Certificate(req AuthCertRequest) (*tls.Certificate, error)
// VOLATILE: This API is subject to change at any time.
Credentials(req AuthCredsRequest) ([]UserPassPair, error)
}
// PasswordAuthenticator implements an Authenticator which uses an RBAC username and password.
type PasswordAuthenticator struct {
Username string
Password string
}
// SupportsTLS returns whether this authenticator can authenticate a TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) SupportsTLS() bool {
return true
}
// SupportsNonTLS returns whether this authenticator can authenticate a non-TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) SupportsNonTLS() bool {
return true
}
// Certificate returns the certificate to use when connecting to a specified server.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) Certificate(req AuthCertRequest) (*tls.Certificate, error) {
return nil, nil
}
// Credentials returns the credentials for a particular service.
// VOLATILE: This API is subject to change at any time.
func (ra PasswordAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: ra.Username,
Password: ra.Password,
}}, nil
}
// CertificateAuthenticator implements an Authenticator which can be used with certificate authentication.
type CertificateAuthenticator struct {
ClientCertificate *tls.Certificate
}
// SupportsTLS returns whether this authenticator can authenticate a TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) SupportsTLS() bool {
return true
}
// SupportsNonTLS returns whether this authenticator can authenticate a non-TLS connection.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) SupportsNonTLS() bool {
return false
}
// Certificate returns the certificate to use when connecting to a specified server.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) Certificate(req AuthCertRequest) (*tls.Certificate, error) {
return ca.ClientCertificate, nil
}
// Credentials returns the credentials for a particular service.
// VOLATILE: This API is subject to change at any time.
func (ca CertificateAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) {
return []UserPassPair{{
Username: "",
Password: "",
}}, nil
}
type coreAuthWrapper struct {
auth Authenticator
}
func (auth *coreAuthWrapper) SupportsTLS() bool {
return auth.auth.SupportsTLS()
}
func (auth *coreAuthWrapper) SupportsNonTLS() bool {
return auth.auth.SupportsNonTLS()
}
func (auth *coreAuthWrapper) Certificate(req gocbcore.AuthCertRequest) (*tls.Certificate, error) {
return auth.auth.Certificate(AuthCertRequest{
Service: ServiceType(req.Service),
Endpoint: req.Endpoint,
})
}
func (auth *coreAuthWrapper) Credentials(req gocbcore.AuthCredsRequest) ([]gocbcore.UserPassPair, error) {
creds, err := auth.auth.Credentials(AuthCredsRequest{
Service: ServiceType(req.Service),
Endpoint: req.Endpoint,
})
if err != nil {
return nil, err
}
coreCreds := make([]gocbcore.UserPassPair, len(creds))
for credIdx, userPass := range creds {
coreCreds[credIdx] = gocbcore.UserPassPair(userPass)
}
return coreCreds, nil
}

View File

@@ -1,153 +0,0 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
)
// Bucket represents a single bucket within a cluster.
type Bucket struct {
bucketName string
timeoutsConfig TimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
tracer requestTracer
useServerDurations bool
useMutationTokens bool
bootstrapError error
connectionManager connectionManager
}
func newBucket(c *Cluster, bucketName string) *Bucket {
return &Bucket{
bucketName: bucketName,
timeoutsConfig: c.timeoutsConfig,
transcoder: c.transcoder,
retryStrategyWrapper: c.retryStrategyWrapper,
tracer: c.tracer,
useServerDurations: c.useServerDurations,
useMutationTokens: c.useMutationTokens,
connectionManager: c.connectionManager,
}
}
func (b *Bucket) setBootstrapError(err error) {
b.bootstrapError = err
}
func (b *Bucket) getKvProvider() (kvProvider, error) {
if b.bootstrapError != nil {
return nil, b.bootstrapError
}
agent, err := b.connectionManager.getKvProvider(b.bucketName)
if err != nil {
return nil, err
}
return agent, nil
}
// Name returns the name of the bucket.
func (b *Bucket) Name() string {
return b.bucketName
}
// Scope returns an instance of a Scope.
// VOLATILE: This API is subject to change at any time.
func (b *Bucket) Scope(scopeName string) *Scope {
return newScope(b, scopeName)
}
// DefaultScope returns an instance of the default scope.
// VOLATILE: This API is subject to change at any time.
func (b *Bucket) DefaultScope() *Scope {
return b.Scope("_default")
}
// Collection returns an instance of a collection from within the default scope.
// VOLATILE: This API is subject to change at any time.
func (b *Bucket) Collection(collectionName string) *Collection {
return b.DefaultScope().Collection(collectionName)
}
// DefaultCollection returns an instance of the default collection.
func (b *Bucket) DefaultCollection() *Collection {
return b.DefaultScope().Collection("_default")
}
// ViewIndexes returns a ViewIndexManager instance for managing views.
func (b *Bucket) ViewIndexes() *ViewIndexManager {
return &ViewIndexManager{
mgmtProvider: b,
bucketName: b.Name(),
tracer: b.tracer,
}
}
// Collections provides functions for managing collections.
func (b *Bucket) Collections() *CollectionManager {
// TODO: return error for unsupported collections
return &CollectionManager{
mgmtProvider: b,
bucketName: b.Name(),
tracer: b.tracer,
}
}
// WaitUntilReady will wait for the bucket object to be ready for use.
// At present this will wait until memd connections have been established with the server and are ready
// to be used before performing a ping against the specified services (except KeyValue) which also
// exist in the cluster map.
// If no services are specified then will wait until KeyValue is ready.
// Valid service types are: ServiceTypeKeyValue, ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch,
// ServiceTypeAnalytics, ServiceTypeViews.
func (b *Bucket) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error {
if opts == nil {
opts = &WaitUntilReadyOptions{}
}
if b.bootstrapError != nil {
return b.bootstrapError
}
provider, err := b.connectionManager.getWaitUntilReadyProvider(b.bucketName)
if err != nil {
return err
}
desiredState := opts.DesiredState
if desiredState == 0 {
desiredState = ClusterStateOnline
}
services := opts.ServiceTypes
gocbcoreServices := make([]gocbcore.ServiceType, len(services))
for i, svc := range services {
gocbcoreServices[i] = gocbcore.ServiceType(svc)
}
err = provider.WaitUntilReady(
time.Now().Add(timeout),
gocbcore.WaitUntilReadyOptions{
DesiredState: gocbcore.ClusterState(desiredState),
ServiceTypes: gocbcoreServices,
},
)
if err != nil {
return err
}
return nil
}

View File

@@ -1,389 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/couchbase/gocbcore/v9"
)
// CollectionSpec describes the specification of a collection.
type CollectionSpec struct {
Name string
ScopeName string
MaxExpiry time.Duration
}
// ScopeSpec describes the specification of a scope.
type ScopeSpec struct {
Name string
Collections []CollectionSpec
}
// These 3 types are temporary. They are necessary for now as the server beta was released with ns_server returning
// a different jsonManifest format to what it will return in the future.
type jsonManifest struct {
UID uint64 `json:"uid"`
Scopes map[string]jsonManifestScope `json:"scopes"`
}
type jsonManifestScope struct {
UID uint32 `json:"uid"`
Collections map[string]jsonManifestCollection `json:"collections"`
}
type jsonManifestCollection struct {
UID uint32 `json:"uid"`
}
// CollectionManager provides methods for performing collections management.
type CollectionManager struct {
mgmtProvider mgmtProvider
bucketName string
tracer requestTracer
}
func (cm *CollectionManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("failed to read http body: %s", err)
return nil
}
errText := strings.ToLower(string(b))
if resp.StatusCode == 404 {
if strings.Contains(errText, "not found") && strings.Contains(errText, "scope") {
return makeGenericMgmtError(ErrScopeNotFound, req, resp)
} else if strings.Contains(errText, "not found") && strings.Contains(errText, "scope") {
return makeGenericMgmtError(ErrScopeNotFound, req, resp)
}
}
if strings.Contains(errText, "already exists") && strings.Contains(errText, "collection") {
return makeGenericMgmtError(ErrCollectionExists, req, resp)
} else if strings.Contains(errText, "already exists") && strings.Contains(errText, "scope") {
return makeGenericMgmtError(ErrScopeExists, req, resp)
}
return makeGenericMgmtError(errors.New(errText), req, resp)
}
// GetAllScopesOptions is the set of options available to the GetAllScopes operation.
type GetAllScopesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllScopes gets all scopes from the bucket.
func (cm *CollectionManager) GetAllScopes(opts *GetAllScopesOptions) ([]ScopeSpec, error) {
if opts == nil {
opts = &GetAllScopesOptions{}
}
span := cm.tracer.StartSpan("GetAllScopes", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections", cm.bucketName),
Method: "GET",
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return nil, colErr
}
return nil, makeMgmtBadStatusError("failed to get all scopes", &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
return nil, makeMgmtBadStatusError("failed to get all scopes", &req, resp)
}
var scopes []ScopeSpec
var mfest gocbcore.Manifest
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&mfest)
if err == nil {
for _, scope := range mfest.Scopes {
var collections []CollectionSpec
for _, col := range scope.Collections {
collections = append(collections, CollectionSpec{
Name: col.Name,
ScopeName: scope.Name,
})
}
scopes = append(scopes, ScopeSpec{
Name: scope.Name,
Collections: collections,
})
}
} else {
// Temporary support for older server version
var oldMfest jsonManifest
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&oldMfest)
if err != nil {
return nil, err
}
for scopeName, scope := range oldMfest.Scopes {
var collections []CollectionSpec
for colName := range scope.Collections {
collections = append(collections, CollectionSpec{
Name: colName,
ScopeName: scopeName,
})
}
scopes = append(scopes, ScopeSpec{
Name: scopeName,
Collections: collections,
})
}
}
return scopes, nil
}
// CreateCollectionOptions is the set of options available to the CreateCollection operation.
type CreateCollectionOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateCollection creates a new collection on the bucket.
func (cm *CollectionManager) CreateCollection(spec CollectionSpec, opts *CreateCollectionOptions) error {
if spec.Name == "" {
return makeInvalidArgumentsError("collection name cannot be empty")
}
if spec.ScopeName == "" {
return makeInvalidArgumentsError("scope name cannot be empty")
}
if opts == nil {
opts = &CreateCollectionOptions{}
}
span := cm.tracer.StartSpan("CreateCollection", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts := url.Values{}
posts.Add("name", spec.Name)
if spec.MaxExpiry > 0 {
posts.Add("maxTTL", fmt.Sprintf("%d", int(spec.MaxExpiry.Seconds())))
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s", cm.bucketName, spec.ScopeName),
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to create collection", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}
// DropCollectionOptions is the set of options available to the DropCollection operation.
type DropCollectionOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropCollection removes a collection.
func (cm *CollectionManager) DropCollection(spec CollectionSpec, opts *DropCollectionOptions) error {
if spec.Name == "" {
return makeInvalidArgumentsError("collection name cannot be empty")
}
if spec.ScopeName == "" {
return makeInvalidArgumentsError("scope name cannot be empty")
}
if opts == nil {
opts = &DropCollectionOptions{}
}
span := cm.tracer.StartSpan("DropCollection", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s/%s", cm.bucketName, spec.ScopeName, spec.Name),
Method: "DELETE",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to drop collection", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}
// CreateScopeOptions is the set of options available to the CreateScope operation.
type CreateScopeOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateScope creates a new scope on the bucket.
func (cm *CollectionManager) CreateScope(scopeName string, opts *CreateScopeOptions) error {
if scopeName == "" {
return makeInvalidArgumentsError("scope name cannot be empty")
}
if opts == nil {
opts = &CreateScopeOptions{}
}
span := cm.tracer.StartSpan("CreateScope", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts := url.Values{}
posts.Add("name", scopeName)
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections", cm.bucketName),
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to create scope", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}
// DropScopeOptions is the set of options available to the DropScope operation.
type DropScopeOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropScope removes a scope.
func (cm *CollectionManager) DropScope(scopeName string, opts *DropScopeOptions) error {
if opts == nil {
opts = &DropScopeOptions{}
}
span := cm.tracer.StartSpan("DropScope", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s", cm.bucketName, scopeName),
Method: "DELETE",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := cm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
colErr := cm.tryParseErrorMessage(&req, resp)
if colErr != nil {
return colErr
}
return makeMgmtBadStatusError("failed to drop scope", &req, resp)
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return nil
}

View File

@@ -1,20 +0,0 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// InternalBucket is used for internal functionality.
// Internal: This should never be used and is not supported.
type InternalBucket struct {
bucket *Bucket
}
// Internal returns a CollectionInternal.
// Internal: This should never be used and is not supported.
func (b *Bucket) Internal() *InternalBucket {
return &InternalBucket{bucket: b}
}
// IORouter returns the collection's internal core router.
func (ib *InternalBucket) IORouter() (*gocbcore.Agent, error) {
return ib.bucket.connectionManager.connection(ib.bucket.Name())
}

View File

@@ -1,95 +0,0 @@
package gocb
import (
"encoding/json"
"time"
)
// EndpointPingReport represents a single entry in a ping report.
type EndpointPingReport struct {
ID string
Local string
Remote string
State PingState
Error string
Namespace string
Latency time.Duration
}
// PingResult encapsulates the details from a executed ping operation.
type PingResult struct {
ID string
Services map[ServiceType][]EndpointPingReport
sdk string
}
type jsonEndpointPingReport struct {
ID string `json:"id,omitempty"`
Local string `json:"local,omitempty"`
Remote string `json:"remote,omitempty"`
State string `json:"state,omitempty"`
Error string `json:"error,omitempty"`
Namespace string `json:"namespace,omitempty"`
LatencyUs uint64 `json:"latency_us"`
}
type jsonPingReport struct {
Version uint16 `json:"version"`
SDK string `json:"sdk,omitempty"`
ID string `json:"id,omitempty"`
Services map[string][]jsonEndpointPingReport `json:"services,omitempty"`
}
// MarshalJSON generates a JSON representation of this ping report.
func (report *PingResult) MarshalJSON() ([]byte, error) {
jsonReport := jsonPingReport{
Version: 2,
SDK: report.sdk,
ID: report.ID,
Services: make(map[string][]jsonEndpointPingReport),
}
for serviceType, serviceInfo := range report.Services {
serviceStr := serviceTypeToString(serviceType)
if _, ok := jsonReport.Services[serviceStr]; !ok {
jsonReport.Services[serviceStr] = make([]jsonEndpointPingReport, 0)
}
for _, service := range serviceInfo {
jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonEndpointPingReport{
ID: service.ID,
Local: service.Local,
Remote: service.Remote,
State: pingStateToString(service.State),
Error: service.Error,
Namespace: service.Namespace,
LatencyUs: uint64(service.Latency / time.Nanosecond),
})
}
}
return json.Marshal(&jsonReport)
}
// PingOptions are the options available to the Ping operation.
type PingOptions struct {
ServiceTypes []ServiceType
ReportID string
Timeout time.Duration
}
// Ping will ping a list of services and verify they are active and
// responding in an acceptable period of time.
func (b *Bucket) Ping(opts *PingOptions) (*PingResult, error) {
if opts == nil {
opts = &PingOptions{}
}
provider, err := b.connectionManager.getDiagnosticsProvider(b.bucketName)
if err != nil {
return nil, err
}
return ping(provider, opts, b.timeoutsConfig)
}

View File

@@ -1,460 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/pkg/errors"
)
// DesignDocumentNamespace represents which namespace a design document resides in.
type DesignDocumentNamespace uint
const (
// DesignDocumentNamespaceProduction means that a design document resides in the production namespace.
DesignDocumentNamespaceProduction DesignDocumentNamespace = iota
// DesignDocumentNamespaceDevelopment means that a design document resides in the development namespace.
DesignDocumentNamespaceDevelopment
)
// View represents a Couchbase view within a design document.
type jsonView struct {
Map string `json:"map,omitempty"`
Reduce string `json:"reduce,omitempty"`
}
// DesignDocument represents a Couchbase design document containing multiple views.
type jsonDesignDocument struct {
Views map[string]jsonView `json:"views,omitempty"`
}
// View represents a Couchbase view within a design document.
type View struct {
Map string
Reduce string
}
func (v *View) fromData(data jsonView) error {
v.Map = data.Map
v.Reduce = data.Reduce
return nil
}
func (v *View) toData() (jsonView, error) {
var data jsonView
data.Map = v.Map
data.Reduce = v.Reduce
return data, nil
}
// DesignDocument represents a Couchbase design document containing multiple views.
type DesignDocument struct {
Name string
Views map[string]View
}
func (dd *DesignDocument) fromData(data jsonDesignDocument, name string) error {
dd.Name = name
views := make(map[string]View)
for viewName, viewData := range data.Views {
var view View
err := view.fromData(viewData)
if err != nil {
return err
}
views[viewName] = view
}
dd.Views = views
return nil
}
func (dd *DesignDocument) toData() (jsonDesignDocument, string, error) {
var data jsonDesignDocument
views := make(map[string]jsonView)
for viewName, view := range dd.Views {
viewData, err := view.toData()
if err != nil {
return jsonDesignDocument{}, "", err
}
views[viewName] = viewData
}
data.Views = views
return data, dd.Name, nil
}
// ViewIndexManager provides methods for performing View management.
type ViewIndexManager struct {
mgmtProvider mgmtProvider
bucketName string
tracer requestTracer
}
func (vm *ViewIndexManager) tryParseErrorMessage(req mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read view index manager response body: %s", err)
return nil
}
if resp.StatusCode == 404 {
if strings.Contains(strings.ToLower(string(b)), "not_found") {
return makeGenericMgmtError(ErrDesignDocumentNotFound, &req, resp)
}
return makeGenericMgmtError(errors.New(string(b)), &req, resp)
}
var mgrErr bucketMgrErrorResp
err = json.Unmarshal(b, &mgrErr)
if err != nil {
logDebugf("Failed to unmarshal error body: %s", err)
return makeGenericMgmtError(errors.New(string(b)), &req, resp)
}
var bodyErr error
var firstErr string
for _, err := range mgrErr.Errors {
firstErr = strings.ToLower(err)
break
}
if strings.Contains(firstErr, "bucket with given name already exists") {
bodyErr = ErrBucketExists
} else {
bodyErr = errors.New(firstErr)
}
return makeGenericMgmtError(bodyErr, &req, resp)
}
func (vm *ViewIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) {
resp, err := vm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// GetDesignDocumentOptions is the set of options available to the ViewIndexManager GetDesignDocument operation.
type GetDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
func (vm *ViewIndexManager) ddocName(name string, namespace DesignDocumentNamespace) string {
if namespace == DesignDocumentNamespaceProduction {
if strings.HasPrefix(name, "dev_") {
name = strings.TrimLeft(name, "dev_")
}
} else {
if !strings.HasPrefix(name, "dev_") {
name = "dev_" + name
}
}
return name
}
// GetDesignDocument retrieves a single design document for the given bucket.
func (vm *ViewIndexManager) GetDesignDocument(name string, namespace DesignDocumentNamespace, opts *GetDesignDocumentOptions) (*DesignDocument, error) {
if opts == nil {
opts = &GetDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("GetDesignDocument", nil).SetTag("couchbase.service", "view")
defer span.Finish()
return vm.getDesignDocument(span.Context(), name, namespace, time.Now(), opts)
}
func (vm *ViewIndexManager) getDesignDocument(tracectx requestSpanContext, name string, namespace DesignDocumentNamespace,
startTime time.Time, opts *GetDesignDocumentOptions) (*DesignDocument, error) {
name = vm.ddocName(name, namespace)
req := mgmtRequest{
Service: ServiceTypeViews,
Path: fmt.Sprintf("/_design/%s", name),
Method: "GET",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: tracectx,
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return nil, vwErr
}
return nil, makeGenericMgmtError(errors.New("failed to get design document"), &req, resp)
}
var ddocData jsonDesignDocument
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&ddocData)
if err != nil {
return nil, err
}
ddocName := strings.TrimPrefix(name, "dev_")
var ddoc DesignDocument
err = ddoc.fromData(ddocData, ddocName)
if err != nil {
return nil, err
}
return &ddoc, nil
}
// GetAllDesignDocumentsOptions is the set of options available to the ViewIndexManager GetAllDesignDocuments operation.
type GetAllDesignDocumentsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllDesignDocuments will retrieve all design documents for the given bucket.
func (vm *ViewIndexManager) GetAllDesignDocuments(namespace DesignDocumentNamespace, opts *GetAllDesignDocumentsOptions) ([]DesignDocument, error) {
if opts == nil {
opts = &GetAllDesignDocumentsOptions{}
}
span := vm.tracer.StartSpan("GetAllDesignDocuments", nil).SetTag("couchbase.service", "view")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/ddocs", vm.bucketName),
Method: "GET",
IsIdempotent: true,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span.Context(),
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return nil, vwErr
}
return nil, makeGenericMgmtError(errors.New("failed to get design documents"), &req, resp)
}
var ddocsResp struct {
Rows []struct {
Doc struct {
Meta struct {
ID string `json:"id"`
}
JSON jsonDesignDocument `json:"json"`
} `json:"doc"`
} `json:"rows"`
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&ddocsResp)
if err != nil {
return nil, err
}
ddocs := make([]DesignDocument, len(ddocsResp.Rows))
for ddocIdx, ddocData := range ddocsResp.Rows {
ddocName := strings.TrimPrefix(ddocData.Doc.Meta.ID[8:], "dev_")
err := ddocs[ddocIdx].fromData(ddocData.Doc.JSON, ddocName)
if err != nil {
return nil, err
}
}
return ddocs, nil
}
// UpsertDesignDocumentOptions is the set of options available to the ViewIndexManager UpsertDesignDocument operation.
type UpsertDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpsertDesignDocument will insert a design document to the given bucket, or update
// an existing design document with the same name.
func (vm *ViewIndexManager) UpsertDesignDocument(ddoc DesignDocument, namespace DesignDocumentNamespace, opts *UpsertDesignDocumentOptions) error {
if opts == nil {
opts = &UpsertDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("UpsertDesignDocument", nil).SetTag("couchbase.service", "view")
defer span.Finish()
return vm.upsertDesignDocument(span.Context(), ddoc, namespace, time.Now(), opts)
}
func (vm *ViewIndexManager) upsertDesignDocument(
tracectx requestSpanContext,
ddoc DesignDocument,
namespace DesignDocumentNamespace,
startTime time.Time,
opts *UpsertDesignDocumentOptions,
) error {
ddocData, ddocName, err := ddoc.toData()
if err != nil {
return err
}
espan := vm.tracer.StartSpan("encode", tracectx)
data, err := json.Marshal(&ddocData)
espan.Finish()
if err != nil {
return err
}
ddocName = vm.ddocName(ddocName, namespace)
req := mgmtRequest{
Service: ServiceTypeViews,
Path: fmt.Sprintf("/_design/%s", ddocName),
Method: "PUT",
Body: data,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 201 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return vwErr
}
return makeGenericMgmtError(errors.New("failed to upsert design document"), &req, resp)
}
return nil
}
// DropDesignDocumentOptions is the set of options available to the ViewIndexManager Upsert operation.
type DropDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropDesignDocument will remove a design document from the given bucket.
func (vm *ViewIndexManager) DropDesignDocument(name string, namespace DesignDocumentNamespace, opts *DropDesignDocumentOptions) error {
if opts == nil {
opts = &DropDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("DropDesignDocument", nil).SetTag("couchbase.service", "view")
defer span.Finish()
return vm.dropDesignDocument(span.Context(), name, namespace, time.Now(), opts)
}
func (vm *ViewIndexManager) dropDesignDocument(tracectx requestSpanContext, name string, namespace DesignDocumentNamespace,
startTime time.Time, opts *DropDesignDocumentOptions) error {
name = vm.ddocName(name, namespace)
req := mgmtRequest{
Service: ServiceTypeViews,
Path: fmt.Sprintf("/_design/%s", name),
Method: "DELETE",
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
}
resp, err := vm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
vwErr := vm.tryParseErrorMessage(req, resp)
if vwErr != nil {
return vwErr
}
return makeGenericMgmtError(errors.New("failed to drop design document"), &req, resp)
}
return nil
}
// PublishDesignDocumentOptions is the set of options available to the ViewIndexManager PublishDesignDocument operation.
type PublishDesignDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// PublishDesignDocument publishes a design document to the given bucket.
func (vm *ViewIndexManager) PublishDesignDocument(name string, opts *PublishDesignDocumentOptions) error {
startTime := time.Now()
if opts == nil {
opts = &PublishDesignDocumentOptions{}
}
span := vm.tracer.StartSpan("PublishDesignDocument", nil).
SetTag("couchbase.service", "view")
defer span.Finish()
devdoc, err := vm.getDesignDocument(
span.Context(),
name,
DesignDocumentNamespaceDevelopment,
startTime,
&GetDesignDocumentOptions{
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
})
if err != nil {
return err
}
err = vm.upsertDesignDocument(
span.Context(),
*devdoc,
DesignDocumentNamespaceProduction,
startTime,
&UpsertDesignDocumentOptions{
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
})
if err != nil {
return err
}
return nil
}

View File

@@ -1,206 +0,0 @@
package gocb
import (
"encoding/json"
"net/url"
"strings"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
type jsonViewResponse struct {
TotalRows uint64 `json:"total_rows,omitempty"`
DebugInfo interface{} `json:"debug_info,omitempty"`
}
type jsonViewRow struct {
ID string `json:"id"`
Key json.RawMessage `json:"key"`
Value json.RawMessage `json:"value"`
}
// ViewMetaData provides access to the meta-data properties of a view query result.
type ViewMetaData struct {
TotalRows uint64
Debug interface{}
}
func (meta *ViewMetaData) fromData(data jsonViewResponse) error {
meta.TotalRows = data.TotalRows
meta.Debug = data.DebugInfo
return nil
}
// ViewRow represents a single row returned from a view query.
type ViewRow struct {
ID string
keyBytes []byte
valueBytes []byte
}
// Key returns the key associated with this view row.
func (vr *ViewRow) Key(valuePtr interface{}) error {
return json.Unmarshal(vr.keyBytes, valuePtr)
}
// Value returns the value associated with this view row.
func (vr *ViewRow) Value(valuePtr interface{}) error {
return json.Unmarshal(vr.valueBytes, valuePtr)
}
type viewRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
}
// ViewResult implements an iterator interface which can be used to iterate over the rows of the query results.
type ViewResult struct {
reader viewRowReader
currentRow ViewRow
}
func newViewResult(reader viewRowReader) *ViewResult {
return &ViewResult{
reader: reader,
}
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *ViewResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.currentRow = ViewRow{}
var rowData jsonViewRow
if err := json.Unmarshal(rowBytes, &rowData); err == nil {
r.currentRow.ID = rowData.ID
r.currentRow.keyBytes = rowData.Key
r.currentRow.valueBytes = rowData.Value
}
return true
}
// Row returns the contents of the current row.
func (r *ViewResult) Row() ViewRow {
return r.currentRow
}
// Err returns any errors that have occurred on the stream
func (r *ViewResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *ViewResult) Close() error {
return r.reader.Close()
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *ViewResult) MetaData() (*ViewMetaData, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return nil, err
}
var jsonResp jsonViewResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return nil, err
}
var metaData ViewMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
// ViewQuery performs a view query and returns a list of rows or an error.
func (b *Bucket) ViewQuery(designDoc string, viewName string, opts *ViewOptions) (*ViewResult, error) {
if opts == nil {
opts = &ViewOptions{}
}
span := b.tracer.StartSpan("ViewQuery", opts.parentSpan).
SetTag("couchbase.service", "view")
defer span.Finish()
designDoc = b.maybePrefixDevDocument(opts.Namespace, designDoc)
timeout := opts.Timeout
if timeout == 0 {
timeout = b.timeoutsConfig.ViewTimeout
}
deadline := time.Now().Add(timeout)
retryWrapper := b.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryWrapper = newRetryStrategyWrapper(opts.RetryStrategy)
}
urlValues, err := opts.toURLValues()
if err != nil {
return nil, errors.Wrap(err, "could not parse query options")
}
return b.execViewQuery(span.Context(), "_view", designDoc, viewName, *urlValues, deadline, retryWrapper)
}
func (b *Bucket) execViewQuery(
span requestSpanContext,
viewType, ddoc, viewName string,
options url.Values,
deadline time.Time,
wrapper *retryStrategyWrapper,
) (*ViewResult, error) {
provider, err := b.connectionManager.getViewProvider()
if err != nil {
return nil, ViewError{
InnerError: wrapError(err, "failed to get query provider"),
DesignDocumentName: ddoc,
ViewName: viewName,
}
}
res, err := provider.ViewQuery(gocbcore.ViewQueryOptions{
DesignDocumentName: ddoc,
ViewType: viewType,
ViewName: viewName,
Options: options,
RetryStrategy: wrapper,
Deadline: deadline,
TraceContext: span,
})
if err != nil {
return nil, maybeEnhanceViewError(err)
}
return newViewResult(res), nil
}
func (b *Bucket) maybePrefixDevDocument(namespace DesignDocumentNamespace, ddoc string) string {
designDoc := ddoc
if namespace == DesignDocumentNamespaceProduction {
designDoc = strings.TrimPrefix(ddoc, "dev_")
} else {
if !strings.HasPrefix(ddoc, "dev_") {
designDoc = "dev_" + ddoc
}
}
return designDoc
}

View File

@@ -1,18 +0,0 @@
package gocb
import "time"
// CircuitBreakerCallback is the callback used by the circuit breaker to determine if an error should count toward
// the circuit breaker failure count.
type CircuitBreakerCallback func(error) bool
// CircuitBreakerConfig are the settings for configuring circuit breakers.
type CircuitBreakerConfig struct {
Disabled bool
VolumeThreshold int64
ErrorThresholdPercentage float64
SleepWindow time.Duration
RollingWindow time.Duration
CompletionCallback CircuitBreakerCallback
CanaryTimeout time.Duration
}

View File

@@ -1,231 +0,0 @@
package gocb
import (
"crypto/x509"
"sync"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
type connectionManager interface {
connect() error
openBucket(bucketName string) error
buildConfig(cluster *Cluster) error
getKvProvider(bucketName string) (kvProvider, error)
getViewProvider() (viewProvider, error)
getQueryProvider() (queryProvider, error)
getAnalyticsProvider() (analyticsProvider, error)
getSearchProvider() (searchProvider, error)
getHTTPProvider() (httpProvider, error)
getDiagnosticsProvider(bucketName string) (diagnosticsProvider, error)
getWaitUntilReadyProvider(bucketName string) (waitUntilReadyProvider, error)
connection(bucketName string) (*gocbcore.Agent, error)
close() error
}
type stdConnectionMgr struct {
lock sync.Mutex
agentgroup *gocbcore.AgentGroup
config *gocbcore.AgentGroupConfig
}
func newConnectionMgr() *stdConnectionMgr {
client := &stdConnectionMgr{}
return client
}
func (c *stdConnectionMgr) buildConfig(cluster *Cluster) error {
c.lock.Lock()
defer c.lock.Unlock()
breakerCfg := cluster.circuitBreakerConfig
var completionCallback func(err error) bool
if breakerCfg.CompletionCallback != nil {
completionCallback = func(err error) bool {
wrappedErr := maybeEnhanceKVErr(err, "", "", "", "")
return breakerCfg.CompletionCallback(wrappedErr)
}
}
var tlsRootCAProvider func() *x509.CertPool
if cluster.internalConfig.TLSRootCAProvider == nil {
tlsRootCAProvider = func() *x509.CertPool {
if cluster.securityConfig.TLSSkipVerify {
return nil
}
return cluster.securityConfig.TLSRootCAs
}
} else {
tlsRootCAProvider = cluster.internalConfig.TLSRootCAProvider
}
config := &gocbcore.AgentGroupConfig{
AgentConfig: gocbcore.AgentConfig{
UserAgent: Identifier(),
TLSRootCAProvider: tlsRootCAProvider,
ConnectTimeout: cluster.timeoutsConfig.ConnectTimeout,
UseMutationTokens: cluster.useMutationTokens,
KVConnectTimeout: 7000 * time.Millisecond,
UseDurations: cluster.useServerDurations,
UseCollections: true,
UseZombieLogger: cluster.orphanLoggerEnabled,
ZombieLoggerInterval: cluster.orphanLoggerInterval,
ZombieLoggerSampleSize: int(cluster.orphanLoggerSampleSize),
NoRootTraceSpans: true,
Tracer: &requestTracerWrapper{cluster.tracer},
CircuitBreakerConfig: gocbcore.CircuitBreakerConfig{
Enabled: !breakerCfg.Disabled,
VolumeThreshold: breakerCfg.VolumeThreshold,
ErrorThresholdPercentage: breakerCfg.ErrorThresholdPercentage,
SleepWindow: breakerCfg.SleepWindow,
RollingWindow: breakerCfg.RollingWindow,
CanaryTimeout: breakerCfg.CanaryTimeout,
CompletionCallback: completionCallback,
},
DefaultRetryStrategy: cluster.retryStrategyWrapper,
},
}
err := config.FromConnStr(cluster.connSpec().String())
if err != nil {
return err
}
config.Auth = &coreAuthWrapper{
auth: cluster.authenticator(),
}
c.config = config
return nil
}
func (c *stdConnectionMgr) connect() error {
c.lock.Lock()
defer c.lock.Unlock()
var err error
c.agentgroup, err = gocbcore.CreateAgentGroup(c.config)
if err != nil {
return maybeEnhanceKVErr(err, "", "", "", "")
}
return nil
}
func (c *stdConnectionMgr) openBucket(bucketName string) error {
if c.agentgroup == nil {
return errors.New("cluster not yet connected")
}
return c.agentgroup.OpenBucket(bucketName)
}
func (c *stdConnectionMgr) getKvProvider(bucketName string) (kvProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("bucket not yet connected")
}
return agent, nil
}
func (c *stdConnectionMgr) getViewProvider() (viewProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &viewProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getQueryProvider() (queryProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &queryProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getAnalyticsProvider() (analyticsProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &analyticsProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getSearchProvider() (searchProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &searchProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getHTTPProvider() (httpProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
return &httpProviderWrapper{provider: c.agentgroup}, nil
}
func (c *stdConnectionMgr) getDiagnosticsProvider(bucketName string) (diagnosticsProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
if bucketName == "" {
return &diagnosticsProviderWrapper{provider: c.agentgroup}, nil
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("bucket not yet connected")
}
return &diagnosticsProviderWrapper{provider: agent}, nil
}
func (c *stdConnectionMgr) getWaitUntilReadyProvider(bucketName string) (waitUntilReadyProvider, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
if bucketName == "" {
return &waitUntilReadyProviderWrapper{provider: c.agentgroup}, nil
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("provider not yet connected")
}
return &waitUntilReadyProviderWrapper{provider: agent}, nil
}
func (c *stdConnectionMgr) connection(bucketName string) (*gocbcore.Agent, error) {
if c.agentgroup == nil {
return nil, errors.New("cluster not yet connected")
}
agent := c.agentgroup.GetAgent(bucketName)
if agent == nil {
return nil, errors.New("bucket not yet connected")
}
return agent, nil
}
func (c *stdConnectionMgr) close() error {
c.lock.Lock()
if c.agentgroup == nil {
c.lock.Unlock()
return errors.New("cluster not yet connected")
}
defer c.lock.Unlock()
return c.agentgroup.Close()
}

View File

@@ -1,474 +0,0 @@
package gocb
import (
"crypto/x509"
"fmt"
"strconv"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
gocbconnstr "github.com/couchbase/gocbcore/v9/connstr"
"github.com/pkg/errors"
)
// Cluster represents a connection to a specific Couchbase cluster.
type Cluster struct {
cSpec gocbconnstr.ConnSpec
auth Authenticator
connectionManager connectionManager
useServerDurations bool
useMutationTokens bool
timeoutsConfig TimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
orphanLoggerEnabled bool
orphanLoggerInterval time.Duration
orphanLoggerSampleSize uint32
tracer requestTracer
circuitBreakerConfig CircuitBreakerConfig
securityConfig SecurityConfig
internalConfig InternalConfig
}
// IoConfig specifies IO related configuration options.
type IoConfig struct {
DisableMutationTokens bool
DisableServerDurations bool
}
// TimeoutsConfig specifies options for various operation timeouts.
type TimeoutsConfig struct {
ConnectTimeout time.Duration
KVTimeout time.Duration
// Volatile: This option is subject to change at any time.
KVDurableTimeout time.Duration
ViewTimeout time.Duration
QueryTimeout time.Duration
AnalyticsTimeout time.Duration
SearchTimeout time.Duration
ManagementTimeout time.Duration
}
// OrphanReporterConfig specifies options for controlling the orphan
// reporter which records when the SDK receives responses for requests
// that are no longer in the system (usually due to being timed out).
type OrphanReporterConfig struct {
Disabled bool
ReportInterval time.Duration
SampleSize uint32
}
// SecurityConfig specifies options for controlling security related
// items such as TLS root certificates and verification skipping.
type SecurityConfig struct {
TLSRootCAs *x509.CertPool
TLSSkipVerify bool
}
// InternalConfig specifies options for controlling various internal
// items.
// Internal: This should never be used and is not supported.
type InternalConfig struct {
TLSRootCAProvider func() *x509.CertPool
}
// ClusterOptions is the set of options available for creating a Cluster.
type ClusterOptions struct {
// Authenticator specifies the authenticator to use with the cluster.
Authenticator Authenticator
// Username & Password specifies the cluster username and password to
// authenticate with. This is equivalent to passing PasswordAuthenticator
// as the Authenticator parameter with the same values.
Username string
Password string
// Timeouts specifies various operation timeouts.
TimeoutsConfig TimeoutsConfig
// Transcoder is used for trancoding data used in KV operations.
Transcoder Transcoder
// RetryStrategy is used to automatically retry operations if they fail.
RetryStrategy RetryStrategy
// Tracer specifies the tracer to use for requests.
// VOLATILE: This API is subject to change at any time.
Tracer requestTracer
// OrphanReporterConfig specifies options for the orphan reporter.
OrphanReporterConfig OrphanReporterConfig
// CircuitBreakerConfig specifies options for the circuit breakers.
CircuitBreakerConfig CircuitBreakerConfig
// IoConfig specifies IO related configuration options.
IoConfig IoConfig
// SecurityConfig specifies security related configuration options.
SecurityConfig SecurityConfig
// Internal: This should never be used and is not supported.
InternalConfig InternalConfig
}
// ClusterCloseOptions is the set of options available when
// disconnecting from a Cluster.
type ClusterCloseOptions struct {
}
func clusterFromOptions(opts ClusterOptions) *Cluster {
if opts.Authenticator == nil {
opts.Authenticator = PasswordAuthenticator{
Username: opts.Username,
Password: opts.Password,
}
}
connectTimeout := 10000 * time.Millisecond
kvTimeout := 2500 * time.Millisecond
kvDurableTimeout := 10000 * time.Millisecond
viewTimeout := 75000 * time.Millisecond
queryTimeout := 75000 * time.Millisecond
analyticsTimeout := 75000 * time.Millisecond
searchTimeout := 75000 * time.Millisecond
managementTimeout := 75000 * time.Millisecond
if opts.TimeoutsConfig.ConnectTimeout > 0 {
connectTimeout = opts.TimeoutsConfig.ConnectTimeout
}
if opts.TimeoutsConfig.KVTimeout > 0 {
kvTimeout = opts.TimeoutsConfig.KVTimeout
}
if opts.TimeoutsConfig.KVDurableTimeout > 0 {
kvDurableTimeout = opts.TimeoutsConfig.KVDurableTimeout
}
if opts.TimeoutsConfig.ViewTimeout > 0 {
viewTimeout = opts.TimeoutsConfig.ViewTimeout
}
if opts.TimeoutsConfig.QueryTimeout > 0 {
queryTimeout = opts.TimeoutsConfig.QueryTimeout
}
if opts.TimeoutsConfig.AnalyticsTimeout > 0 {
analyticsTimeout = opts.TimeoutsConfig.AnalyticsTimeout
}
if opts.TimeoutsConfig.SearchTimeout > 0 {
searchTimeout = opts.TimeoutsConfig.SearchTimeout
}
if opts.TimeoutsConfig.ManagementTimeout > 0 {
managementTimeout = opts.TimeoutsConfig.ManagementTimeout
}
if opts.Transcoder == nil {
opts.Transcoder = NewJSONTranscoder()
}
if opts.RetryStrategy == nil {
opts.RetryStrategy = NewBestEffortRetryStrategy(nil)
}
useMutationTokens := true
useServerDurations := true
if opts.IoConfig.DisableMutationTokens {
useMutationTokens = false
}
if opts.IoConfig.DisableServerDurations {
useServerDurations = false
}
var initialTracer requestTracer
if opts.Tracer != nil {
initialTracer = opts.Tracer
} else {
initialTracer = newThresholdLoggingTracer(nil)
}
tracerAddRef(initialTracer)
return &Cluster{
auth: opts.Authenticator,
timeoutsConfig: TimeoutsConfig{
ConnectTimeout: connectTimeout,
QueryTimeout: queryTimeout,
AnalyticsTimeout: analyticsTimeout,
SearchTimeout: searchTimeout,
ViewTimeout: viewTimeout,
KVTimeout: kvTimeout,
KVDurableTimeout: kvDurableTimeout,
ManagementTimeout: managementTimeout,
},
transcoder: opts.Transcoder,
useMutationTokens: useMutationTokens,
retryStrategyWrapper: newRetryStrategyWrapper(opts.RetryStrategy),
orphanLoggerEnabled: !opts.OrphanReporterConfig.Disabled,
orphanLoggerInterval: opts.OrphanReporterConfig.ReportInterval,
orphanLoggerSampleSize: opts.OrphanReporterConfig.SampleSize,
useServerDurations: useServerDurations,
tracer: initialTracer,
circuitBreakerConfig: opts.CircuitBreakerConfig,
securityConfig: opts.SecurityConfig,
internalConfig: opts.InternalConfig,
}
}
// Connect creates and returns a Cluster instance created using the
// provided options and a connection string.
func Connect(connStr string, opts ClusterOptions) (*Cluster, error) {
connSpec, err := gocbconnstr.Parse(connStr)
if err != nil {
return nil, err
}
if connSpec.Scheme == "http" {
return nil, errors.New("http scheme is not supported, use couchbase or couchbases instead")
}
cluster := clusterFromOptions(opts)
cluster.cSpec = connSpec
err = cluster.parseExtraConnStrOptions(connSpec)
if err != nil {
return nil, err
}
cli := newConnectionMgr()
err = cli.buildConfig(cluster)
if err != nil {
return nil, err
}
err = cli.connect()
if err != nil {
return nil, err
}
cluster.connectionManager = cli
return cluster, nil
}
func (c *Cluster) parseExtraConnStrOptions(spec gocbconnstr.ConnSpec) error {
fetchOption := func(name string) (string, bool) {
optValue := spec.Options[name]
if len(optValue) == 0 {
return "", false
}
return optValue[len(optValue)-1], true
}
if valStr, ok := fetchOption("query_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("query_timeout option must be a number")
}
c.timeoutsConfig.QueryTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("analytics_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("analytics_timeout option must be a number")
}
c.timeoutsConfig.AnalyticsTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("search_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("search_timeout option must be a number")
}
c.timeoutsConfig.SearchTimeout = time.Duration(val) * time.Millisecond
}
if valStr, ok := fetchOption("view_timeout"); ok {
val, err := strconv.ParseInt(valStr, 10, 64)
if err != nil {
return fmt.Errorf("view_timeout option must be a number")
}
c.timeoutsConfig.ViewTimeout = time.Duration(val) * time.Millisecond
}
return nil
}
// Bucket connects the cluster to server(s) and returns a new Bucket instance.
func (c *Cluster) Bucket(bucketName string) *Bucket {
b := newBucket(c, bucketName)
err := c.connectionManager.openBucket(bucketName)
if err != nil {
b.setBootstrapError(err)
}
return b
}
func (c *Cluster) authenticator() Authenticator {
return c.auth
}
func (c *Cluster) connSpec() gocbconnstr.ConnSpec {
return c.cSpec
}
// WaitUntilReadyOptions is the set of options available to the WaitUntilReady operations.
type WaitUntilReadyOptions struct {
DesiredState ClusterState
ServiceTypes []ServiceType
}
// WaitUntilReady will wait for the cluster object to be ready for use.
// At present this will wait until memd connections have been established with the server and are ready
// to be used before performing a ping against the specified services which also
// exist in the cluster map.
// If no services are specified then ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, ServiceTypeAnalytics
// will be pinged.
// Valid service types are: ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, ServiceTypeAnalytics.
func (c *Cluster) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error {
if opts == nil {
opts = &WaitUntilReadyOptions{}
}
cli := c.connectionManager
if cli == nil {
return errors.New("cluster is not connected")
}
provider, err := cli.getWaitUntilReadyProvider("")
if err != nil {
return err
}
desiredState := opts.DesiredState
if desiredState == 0 {
desiredState = ClusterStateOnline
}
services := opts.ServiceTypes
gocbcoreServices := make([]gocbcore.ServiceType, len(services))
for i, svc := range services {
gocbcoreServices[i] = gocbcore.ServiceType(svc)
}
err = provider.WaitUntilReady(
time.Now().Add(timeout),
gocbcore.WaitUntilReadyOptions{
DesiredState: gocbcore.ClusterState(desiredState),
ServiceTypes: gocbcoreServices,
},
)
if err != nil {
return err
}
return nil
}
// Close shuts down all buckets in this cluster and invalidates any references this cluster has.
func (c *Cluster) Close(opts *ClusterCloseOptions) error {
var overallErr error
if c.connectionManager != nil {
err := c.connectionManager.close()
if err != nil {
logWarnf("Failed to close cluster connectionManager in cluster close: %s", err)
overallErr = err
}
}
if c.tracer != nil {
tracerDecRef(c.tracer)
c.tracer = nil
}
return overallErr
}
func (c *Cluster) getDiagnosticsProvider() (diagnosticsProvider, error) {
provider, err := c.connectionManager.getDiagnosticsProvider("")
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getQueryProvider() (queryProvider, error) {
provider, err := c.connectionManager.getQueryProvider()
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getAnalyticsProvider() (analyticsProvider, error) {
provider, err := c.connectionManager.getAnalyticsProvider()
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getSearchProvider() (searchProvider, error) {
provider, err := c.connectionManager.getSearchProvider()
if err != nil {
return nil, err
}
return provider, nil
}
func (c *Cluster) getHTTPProvider() (httpProvider, error) {
provider, err := c.connectionManager.getHTTPProvider()
if err != nil {
return nil, err
}
return provider, nil
}
// Users returns a UserManager for managing users.
func (c *Cluster) Users() *UserManager {
return &UserManager{
provider: c,
tracer: c.tracer,
}
}
// Buckets returns a BucketManager for managing buckets.
func (c *Cluster) Buckets() *BucketManager {
return &BucketManager{
provider: c,
tracer: c.tracer,
}
}
// AnalyticsIndexes returns an AnalyticsIndexManager for managing analytics indexes.
func (c *Cluster) AnalyticsIndexes() *AnalyticsIndexManager {
return &AnalyticsIndexManager{
aProvider: c,
mgmtProvider: c,
globalTimeout: c.timeoutsConfig.ManagementTimeout,
tracer: c.tracer,
}
}
// QueryIndexes returns a QueryIndexManager for managing query indexes.
func (c *Cluster) QueryIndexes() *QueryIndexManager {
return &QueryIndexManager{
provider: c,
globalTimeout: c.timeoutsConfig.ManagementTimeout,
tracer: c.tracer,
}
}
// SearchIndexes returns a SearchIndexManager for managing search indexes.
func (c *Cluster) SearchIndexes() *SearchIndexManager {
return &SearchIndexManager{
mgmtProvider: c,
tracer: c.tracer,
}
}

View File

@@ -1,597 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"strings"
"time"
)
// AnalyticsIndexManager provides methods for performing Couchbase Analytics index management.
type AnalyticsIndexManager struct {
aProvider analyticsIndexQueryProvider
mgmtProvider mgmtProvider
globalTimeout time.Duration
tracer requestTracer
}
type analyticsIndexQueryProvider interface {
AnalyticsQuery(statement string, opts *AnalyticsOptions) (*AnalyticsResult, error)
}
func (am *AnalyticsIndexManager) doAnalyticsQuery(q string, opts *AnalyticsOptions) ([][]byte, error) {
if opts.Timeout == 0 {
opts.Timeout = am.globalTimeout
}
result, err := am.aProvider.AnalyticsQuery(q, opts)
if err != nil {
return nil, err
}
var rows [][]byte
for result.Next() {
var row json.RawMessage
err := result.Row(&row)
if err != nil {
logWarnf("management operation failed to read row: %s", err)
} else {
rows = append(rows, row)
}
}
err = result.Err()
if err != nil {
return nil, err
}
return rows, nil
}
func (am *AnalyticsIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) {
resp, err := am.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
type jsonAnalyticsDataset struct {
DatasetName string `json:"DatasetName"`
DataverseName string `json:"DataverseName"`
LinkName string `json:"LinkName"`
BucketName string `json:"BucketName"`
}
type jsonAnalyticsIndex struct {
IndexName string `json:"IndexName"`
DatasetName string `json:"DatasetName"`
DataverseName string `json:"DataverseName"`
IsPrimary bool `json:"IsPrimary"`
}
// AnalyticsDataset contains information about an analytics dataset.
type AnalyticsDataset struct {
Name string
DataverseName string
LinkName string
BucketName string
}
func (ad *AnalyticsDataset) fromData(data jsonAnalyticsDataset) error {
ad.Name = data.DatasetName
ad.DataverseName = data.DataverseName
ad.LinkName = data.LinkName
ad.BucketName = data.BucketName
return nil
}
// AnalyticsIndex contains information about an analytics index.
type AnalyticsIndex struct {
Name string
DatasetName string
DataverseName string
IsPrimary bool
}
func (ai *AnalyticsIndex) fromData(data jsonAnalyticsIndex) error {
ai.Name = data.IndexName
ai.DatasetName = data.DatasetName
ai.DataverseName = data.DataverseName
ai.IsPrimary = data.IsPrimary
return nil
}
// CreateAnalyticsDataverseOptions is the set of options available to the AnalyticsManager CreateDataverse operation.
type CreateAnalyticsDataverseOptions struct {
IgnoreIfExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateDataverse creates a new analytics dataset.
func (am *AnalyticsIndexManager) CreateDataverse(dataverseName string, opts *CreateAnalyticsDataverseOptions) error {
if opts == nil {
opts = &CreateAnalyticsDataverseOptions{}
}
if dataverseName == "" {
return invalidArgumentsError{
message: "dataset name cannot be empty",
}
}
span := am.tracer.StartSpan("CreateDataverse", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfExists {
ignoreStr = "IF NOT EXISTS"
}
q := fmt.Sprintf("CREATE DATAVERSE `%s` %s", dataverseName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DropAnalyticsDataverseOptions is the set of options available to the AnalyticsManager DropDataverse operation.
type DropAnalyticsDataverseOptions struct {
IgnoreIfNotExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropDataverse drops an analytics dataset.
func (am *AnalyticsIndexManager) DropDataverse(dataverseName string, opts *DropAnalyticsDataverseOptions) error {
if opts == nil {
opts = &DropAnalyticsDataverseOptions{}
}
span := am.tracer.StartSpan("DropDataverse", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfNotExists {
ignoreStr = "IF EXISTS"
}
q := fmt.Sprintf("DROP DATAVERSE %s %s", dataverseName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return err
}
// CreateAnalyticsDatasetOptions is the set of options available to the AnalyticsManager CreateDataset operation.
type CreateAnalyticsDatasetOptions struct {
IgnoreIfExists bool
Condition string
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateDataset creates a new analytics dataset.
func (am *AnalyticsIndexManager) CreateDataset(datasetName, bucketName string, opts *CreateAnalyticsDatasetOptions) error {
if opts == nil {
opts = &CreateAnalyticsDatasetOptions{}
}
if datasetName == "" {
return invalidArgumentsError{
message: "dataset name cannot be empty",
}
}
span := am.tracer.StartSpan("CreateDataset", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfExists {
ignoreStr = "IF NOT EXISTS"
}
var where string
if opts.Condition != "" {
if !strings.HasPrefix(strings.ToUpper(opts.Condition), "WHERE") {
where = "WHERE "
}
where += opts.Condition
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("CREATE DATASET %s %s ON `%s` %s", ignoreStr, datasetName, bucketName, where)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DropAnalyticsDatasetOptions is the set of options available to the AnalyticsManager DropDataset operation.
type DropAnalyticsDatasetOptions struct {
IgnoreIfNotExists bool
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropDataset drops an analytics dataset.
func (am *AnalyticsIndexManager) DropDataset(datasetName string, opts *DropAnalyticsDatasetOptions) error {
if opts == nil {
opts = &DropAnalyticsDatasetOptions{}
}
span := am.tracer.StartSpan("DropDataset", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfNotExists {
ignoreStr = "IF EXISTS"
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("DROP DATASET %s %s", datasetName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// GetAllAnalyticsDatasetsOptions is the set of options available to the AnalyticsManager GetAllDatasets operation.
type GetAllAnalyticsDatasetsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllDatasets gets all analytics datasets.
func (am *AnalyticsIndexManager) GetAllDatasets(opts *GetAllAnalyticsDatasetsOptions) ([]AnalyticsDataset, error) {
if opts == nil {
opts = &GetAllAnalyticsDatasetsOptions{}
}
span := am.tracer.StartSpan("GetAllDatasets", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
q := "SELECT d.* FROM Metadata.`Dataset` d WHERE d.DataverseName <> \"Metadata\""
rows, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return nil, err
}
datasets := make([]AnalyticsDataset, len(rows))
for rowIdx, row := range rows {
var datasetData jsonAnalyticsDataset
err := json.Unmarshal(row, &datasetData)
if err != nil {
return nil, err
}
err = datasets[rowIdx].fromData(datasetData)
if err != nil {
return nil, err
}
}
return datasets, nil
}
// CreateAnalyticsIndexOptions is the set of options available to the AnalyticsManager CreateIndex operation.
type CreateAnalyticsIndexOptions struct {
IgnoreIfExists bool
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateIndex creates a new analytics dataset.
func (am *AnalyticsIndexManager) CreateIndex(datasetName, indexName string, fields map[string]string, opts *CreateAnalyticsIndexOptions) error {
if opts == nil {
opts = &CreateAnalyticsIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{
message: "index name cannot be empty",
}
}
if len(fields) <= 0 {
return invalidArgumentsError{
message: "you must specify at least one field to index",
}
}
span := am.tracer.StartSpan("CreateIndex", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfExists {
ignoreStr = "IF NOT EXISTS"
}
var indexFields []string
for name, typ := range fields {
indexFields = append(indexFields, name+":"+typ)
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("CREATE INDEX `%s` %s ON %s (%s)", indexName, ignoreStr, datasetName, strings.Join(indexFields, ","))
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DropAnalyticsIndexOptions is the set of options available to the AnalyticsManager DropIndex operation.
type DropAnalyticsIndexOptions struct {
IgnoreIfNotExists bool
DataverseName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropIndex drops an analytics index.
func (am *AnalyticsIndexManager) DropIndex(datasetName, indexName string, opts *DropAnalyticsIndexOptions) error {
if opts == nil {
opts = &DropAnalyticsIndexOptions{}
}
span := am.tracer.StartSpan("DropIndex", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
var ignoreStr string
if opts.IgnoreIfNotExists {
ignoreStr = "IF EXISTS"
}
if opts.DataverseName == "" {
datasetName = fmt.Sprintf("`%s`", datasetName)
} else {
datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName)
}
q := fmt.Sprintf("DROP INDEX %s.%s %s", datasetName, indexName, ignoreStr)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// GetAllAnalyticsIndexesOptions is the set of options available to the AnalyticsManager GetAllIndexes operation.
type GetAllAnalyticsIndexesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllIndexes gets all analytics indexes.
func (am *AnalyticsIndexManager) GetAllIndexes(opts *GetAllAnalyticsIndexesOptions) ([]AnalyticsIndex, error) {
if opts == nil {
opts = &GetAllAnalyticsIndexesOptions{}
}
span := am.tracer.StartSpan("GetAllIndexes", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
q := "SELECT d.* FROM Metadata.`Index` d WHERE d.DataverseName <> \"Metadata\""
rows, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return nil, err
}
indexes := make([]AnalyticsIndex, len(rows))
for rowIdx, row := range rows {
var indexData jsonAnalyticsIndex
err := json.Unmarshal(row, &indexData)
if err != nil {
return nil, err
}
err = indexes[rowIdx].fromData(indexData)
if err != nil {
return nil, err
}
}
return indexes, nil
}
// ConnectAnalyticsLinkOptions is the set of options available to the AnalyticsManager ConnectLink operation.
type ConnectAnalyticsLinkOptions struct {
LinkName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// ConnectLink connects an analytics link.
func (am *AnalyticsIndexManager) ConnectLink(opts *ConnectAnalyticsLinkOptions) error {
if opts == nil {
opts = &ConnectAnalyticsLinkOptions{}
}
span := am.tracer.StartSpan("ConnectLink", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
if opts.LinkName == "" {
opts.LinkName = "Local"
}
q := fmt.Sprintf("CONNECT LINK %s", opts.LinkName)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// DisconnectAnalyticsLinkOptions is the set of options available to the AnalyticsManager DisconnectLink operation.
type DisconnectAnalyticsLinkOptions struct {
LinkName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DisconnectLink disconnects an analytics link.
func (am *AnalyticsIndexManager) DisconnectLink(opts *DisconnectAnalyticsLinkOptions) error {
if opts == nil {
opts = &DisconnectAnalyticsLinkOptions{}
}
span := am.tracer.StartSpan("DisconnectLink", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
if opts.LinkName == "" {
opts.LinkName = "Local"
}
q := fmt.Sprintf("DISCONNECT LINK %s", opts.LinkName)
_, err := am.doAnalyticsQuery(q, &AnalyticsOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return err
}
return nil
}
// GetPendingMutationsAnalyticsOptions is the set of options available to the user manager GetPendingMutations operation.
type GetPendingMutationsAnalyticsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetPendingMutations returns the number of pending mutations for all indexes in the form of dataverse.dataset:mutations.
func (am *AnalyticsIndexManager) GetPendingMutations(opts *GetPendingMutationsAnalyticsOptions) (map[string]uint64, error) {
if opts == nil {
opts = &GetPendingMutationsAnalyticsOptions{}
}
span := am.tracer.StartSpan("GetPendingMutations", nil).
SetTag("couchbase.service", "analytics")
defer span.Finish()
timeout := opts.Timeout
if timeout == 0 {
timeout = am.globalTimeout
}
req := mgmtRequest{
Service: ServiceTypeAnalytics,
Method: "GET",
Path: "/analytics/node/agg/stats/remaining",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: timeout,
parentSpan: span.Context(),
}
resp, err := am.doMgmtRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, makeMgmtBadStatusError("failed to get pending mutations", &req, resp)
}
pending := make(map[string]uint64)
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&pending)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
logDebugf("Failed to close socket (%s)", err)
}
return pending, nil
}

View File

@@ -1,300 +0,0 @@
package gocb
import (
"encoding/json"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type jsonAnalyticsMetrics struct {
ElapsedTime string `json:"elapsedTime"`
ExecutionTime string `json:"executionTime"`
ResultCount uint64 `json:"resultCount"`
ResultSize uint64 `json:"resultSize"`
MutationCount uint64 `json:"mutationCount,omitempty"`
SortCount uint64 `json:"sortCount,omitempty"`
ErrorCount uint64 `json:"errorCount,omitempty"`
WarningCount uint64 `json:"warningCount,omitempty"`
ProcessedObjects uint64 `json:"processedObjects,omitempty"`
}
type jsonAnalyticsWarning struct {
Code uint32 `json:"code"`
Message string `json:"msg"`
}
type jsonAnalyticsResponse struct {
RequestID string `json:"requestID"`
ClientContextID string `json:"clientContextID"`
Status string `json:"status"`
Warnings []jsonAnalyticsWarning `json:"warnings"`
Metrics jsonAnalyticsMetrics `json:"metrics"`
Signature interface{} `json:"signature"`
}
// AnalyticsMetrics encapsulates various metrics gathered during a queries execution.
type AnalyticsMetrics struct {
ElapsedTime time.Duration
ExecutionTime time.Duration
ResultCount uint64
ResultSize uint64
MutationCount uint64
SortCount uint64
ErrorCount uint64
WarningCount uint64
ProcessedObjects uint64
}
func (metrics *AnalyticsMetrics) fromData(data jsonAnalyticsMetrics) error {
elapsedTime, err := time.ParseDuration(data.ElapsedTime)
if err != nil {
logDebugf("Failed to parse query metrics elapsed time: %s", err)
}
executionTime, err := time.ParseDuration(data.ExecutionTime)
if err != nil {
logDebugf("Failed to parse query metrics execution time: %s", err)
}
metrics.ElapsedTime = elapsedTime
metrics.ExecutionTime = executionTime
metrics.ResultCount = data.ResultCount
metrics.ResultSize = data.ResultSize
metrics.MutationCount = data.MutationCount
metrics.SortCount = data.SortCount
metrics.ErrorCount = data.ErrorCount
metrics.WarningCount = data.WarningCount
metrics.ProcessedObjects = data.ProcessedObjects
return nil
}
// AnalyticsWarning encapsulates any warnings returned by a query.
type AnalyticsWarning struct {
Code uint32
Message string
}
func (warning *AnalyticsWarning) fromData(data jsonAnalyticsWarning) error {
warning.Code = data.Code
warning.Message = data.Message
return nil
}
// AnalyticsMetaData provides access to the meta-data properties of a query result.
type AnalyticsMetaData struct {
RequestID string
ClientContextID string
Metrics AnalyticsMetrics
Signature interface{}
Warnings []AnalyticsWarning
}
func (meta *AnalyticsMetaData) fromData(data jsonAnalyticsResponse) error {
metrics := AnalyticsMetrics{}
if err := metrics.fromData(data.Metrics); err != nil {
return err
}
warnings := make([]AnalyticsWarning, len(data.Warnings))
for wIdx, jsonWarning := range data.Warnings {
err := warnings[wIdx].fromData(jsonWarning)
if err != nil {
return err
}
}
meta.RequestID = data.RequestID
meta.ClientContextID = data.ClientContextID
meta.Metrics = metrics
meta.Signature = data.Signature
meta.Warnings = warnings
return nil
}
// AnalyticsResult allows access to the results of a query.
type AnalyticsResult struct {
reader analyticsRowReader
rowBytes []byte
}
func newAnalyticsResult(reader analyticsRowReader) *AnalyticsResult {
return &AnalyticsResult{
reader: reader,
}
}
type analyticsRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *AnalyticsResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.rowBytes = rowBytes
return true
}
// Row returns the value of the current row
func (r *AnalyticsResult) Row(valuePtr interface{}) error {
if r.rowBytes == nil {
return ErrNoResult
}
if bytesPtr, ok := valuePtr.(*json.RawMessage); ok {
*bytesPtr = r.rowBytes
return nil
}
return json.Unmarshal(r.rowBytes, valuePtr)
}
// Err returns any errors that have occurred on the stream
func (r *AnalyticsResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *AnalyticsResult) Close() error {
return r.reader.Close()
}
// One assigns the first value from the results into the value pointer.
// It will close the results but not before iterating through all remaining
// results, as such this should only be used for very small resultsets - ideally
// of, at most, length 1.
func (r *AnalyticsResult) One(valuePtr interface{}) error {
// Read the bytes from the first row
valueBytes := r.reader.NextRow()
if valueBytes == nil {
return ErrNoResult
}
// Skip through the remaining rows
for r.reader.NextRow() != nil {
// do nothing with the row
}
return json.Unmarshal(valueBytes, valuePtr)
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *AnalyticsResult) MetaData() (*AnalyticsMetaData, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return nil, err
}
var jsonResp jsonAnalyticsResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return nil, err
}
var metaData AnalyticsMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
// AnalyticsQuery executes the analytics query statement on the server.
func (c *Cluster) AnalyticsQuery(statement string, opts *AnalyticsOptions) (*AnalyticsResult, error) {
if opts == nil {
opts = &AnalyticsOptions{}
}
span := c.tracer.StartSpan("Query", opts.parentSpan).
SetTag("couchbase.service", "analytics")
defer span.Finish()
timeout := opts.Timeout
if opts.Timeout == 0 {
timeout = c.timeoutsConfig.AnalyticsTimeout
}
deadline := time.Now().Add(timeout)
retryStrategy := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy)
}
queryOpts, err := opts.toMap()
if err != nil {
return nil, AnalyticsError{
InnerError: wrapError(err, "failed to generate query options"),
Statement: statement,
ClientContextID: opts.ClientContextID,
}
}
var priorityInt int32
if opts.Priority {
priorityInt = -1
}
queryOpts["statement"] = statement
return c.execAnalyticsQuery(span, queryOpts, priorityInt, deadline, retryStrategy)
}
func maybeGetAnalyticsOption(options map[string]interface{}, name string) string {
if value, ok := options[name].(string); ok {
return value
}
return ""
}
func (c *Cluster) execAnalyticsQuery(
span requestSpan,
options map[string]interface{},
priority int32,
deadline time.Time,
retryStrategy *retryStrategyWrapper,
) (*AnalyticsResult, error) {
provider, err := c.getAnalyticsProvider()
if err != nil {
return nil, AnalyticsError{
InnerError: wrapError(err, "failed to get query provider"),
Statement: maybeGetAnalyticsOption(options, "statement"),
ClientContextID: maybeGetAnalyticsOption(options, "client_context_id"),
}
}
reqBytes, err := json.Marshal(options)
if err != nil {
return nil, AnalyticsError{
InnerError: wrapError(err, "failed to marshall query body"),
Statement: maybeGetAnalyticsOption(options, "statement"),
ClientContextID: maybeGetAnalyticsOption(options, "client_context_id"),
}
}
res, err := provider.AnalyticsQuery(gocbcore.AnalyticsQueryOptions{
Payload: reqBytes,
Priority: int(priority),
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
if err != nil {
return nil, maybeEnhanceAnalyticsError(err)
}
return newAnalyticsResult(res), nil
}

View File

@@ -1,600 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
)
// BucketType specifies the kind of bucket.
type BucketType string
const (
// CouchbaseBucketType indicates a Couchbase bucket type.
CouchbaseBucketType BucketType = "membase"
// MemcachedBucketType indicates a Memcached bucket type.
MemcachedBucketType BucketType = "memcached"
// EphemeralBucketType indicates an Ephemeral bucket type.
EphemeralBucketType BucketType = "ephemeral"
)
// ConflictResolutionType specifies the kind of conflict resolution to use for a bucket.
type ConflictResolutionType string
const (
// ConflictResolutionTypeTimestamp specifies to use timestamp conflict resolution on the bucket.
ConflictResolutionTypeTimestamp ConflictResolutionType = "lww"
// ConflictResolutionTypeSequenceNumber specifies to use sequence number conflict resolution on the bucket.
ConflictResolutionTypeSequenceNumber ConflictResolutionType = "seqno"
)
// EvictionPolicyType specifies the kind of eviction policy to use for a bucket.
type EvictionPolicyType string
const (
// EvictionPolicyTypeFull specifies to use full eviction for a couchbase bucket.
EvictionPolicyTypeFull EvictionPolicyType = "fullEviction"
// EvictionPolicyTypeValueOnly specifies to use value only eviction for a couchbase bucket.
EvictionPolicyTypeValueOnly EvictionPolicyType = "valueOnly"
// EvictionPolicyTypeNotRecentlyUsed specifies to use not recently used (nru) eviction for an ephemeral bucket.
// UNCOMMITTED: This API may change in the future.
EvictionPolicyTypeNotRecentlyUsed EvictionPolicyType = "nruEviction"
// EvictionPolicyTypeNRU specifies to use no eviction for an ephemeral bucket.
// UNCOMMITTED: This API may change in the future.
EvictionPolicyTypeNoEviction EvictionPolicyType = "noEviction"
)
// CompressionMode specifies the kind of compression to use for a bucket.
type CompressionMode string
const (
// CompressionModeOff specifies to use no compression for a bucket.
CompressionModeOff CompressionMode = "off"
// CompressionModePassive specifies to use passive compression for a bucket.
CompressionModePassive CompressionMode = "passive"
// CompressionModeActive specifies to use active compression for a bucket.
CompressionModeActive CompressionMode = "active"
)
type jsonBucketSettings struct {
Name string `json:"name"`
Controllers struct {
Flush string `json:"flush"`
} `json:"controllers"`
ReplicaIndex bool `json:"replicaIndex"`
Quota struct {
RAM uint64 `json:"ram"`
RawRAM uint64 `json:"rawRAM"`
} `json:"quota"`
ReplicaNumber uint32 `json:"replicaNumber"`
BucketType string `json:"bucketType"`
ConflictResolutionType string `json:"conflictResolutionType"`
EvictionPolicy string `json:"evictionPolicy"`
MaxTTL uint32 `json:"maxTTL"`
CompressionMode string `json:"compressionMode"`
}
// BucketSettings holds information about the settings for a bucket.
type BucketSettings struct {
Name string
FlushEnabled bool
ReplicaIndexDisabled bool // inverted so that zero value matches server default.
RAMQuotaMB uint64
NumReplicas uint32 // NOTE: If not set this will set 0 replicas.
BucketType BucketType // Defaults to CouchbaseBucketType.
EvictionPolicy EvictionPolicyType
MaxTTL time.Duration
CompressionMode CompressionMode
}
func (bs *BucketSettings) fromData(data jsonBucketSettings) error {
bs.Name = data.Name
bs.FlushEnabled = data.Controllers.Flush != ""
bs.ReplicaIndexDisabled = !data.ReplicaIndex
bs.RAMQuotaMB = data.Quota.RawRAM / 1024 / 1024
bs.NumReplicas = data.ReplicaNumber
bs.EvictionPolicy = EvictionPolicyType(data.EvictionPolicy)
bs.MaxTTL = time.Duration(data.MaxTTL) * time.Second
bs.CompressionMode = CompressionMode(data.CompressionMode)
switch data.BucketType {
case "membase":
bs.BucketType = CouchbaseBucketType
case "memcached":
bs.BucketType = MemcachedBucketType
case "ephemeral":
bs.BucketType = EphemeralBucketType
default:
return errors.New("unrecognized bucket type string")
}
return nil
}
type bucketMgrErrorResp struct {
Errors map[string]string `json:"errors"`
}
func (bm *BucketManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read bucket manager response body: %s", err)
return nil
}
if resp.StatusCode == 404 {
// If it was a 404 then there's no chance of the response body containing any structure
if strings.Contains(strings.ToLower(string(b)), "resource not found") {
return makeGenericMgmtError(ErrBucketNotFound, req, resp)
}
return makeGenericMgmtError(errors.New(string(b)), req, resp)
}
var mgrErr bucketMgrErrorResp
err = json.Unmarshal(b, &mgrErr)
if err != nil {
logDebugf("Failed to unmarshal error body: %s", err)
return makeGenericMgmtError(errors.New(string(b)), req, resp)
}
var bodyErr error
var firstErr string
for _, err := range mgrErr.Errors {
firstErr = strings.ToLower(err)
break
}
if strings.Contains(firstErr, "bucket with given name already exists") {
bodyErr = ErrBucketExists
} else {
bodyErr = errors.New(firstErr)
}
return makeGenericMgmtError(bodyErr, req, resp)
}
// Flush doesn't use the same body format as anything else...
func (bm *BucketManager) tryParseFlushErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read bucket manager response body: %s", err)
return makeMgmtBadStatusError("failed to flush bucket", req, resp)
}
var bodyErrMsgs map[string]string
err = json.Unmarshal(b, &bodyErrMsgs)
if err != nil {
return errors.New(string(b))
}
if errMsg, ok := bodyErrMsgs["_"]; ok {
if strings.Contains(strings.ToLower(errMsg), "flush is disabled") {
return ErrBucketNotFlushable
}
}
return errors.New(string(b))
}
// BucketManager provides methods for performing bucket management operations.
// See BucketManager for methods that allow creating and removing buckets themselves.
type BucketManager struct {
provider mgmtProvider
tracer requestTracer
}
// GetBucketOptions is the set of options available to the bucket manager GetBucket operation.
type GetBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetBucket returns settings for a bucket on the cluster.
func (bm *BucketManager) GetBucket(bucketName string, opts *GetBucketOptions) (*BucketSettings, error) {
if opts == nil {
opts = &GetBucketOptions{}
}
span := bm.tracer.StartSpan("GetBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
return bm.get(span.Context(), bucketName, opts.RetryStrategy, opts.Timeout)
}
func (bm *BucketManager) get(tracectx requestSpanContext, bucketName string,
strategy RetryStrategy, timeout time.Duration) (*BucketSettings, error) {
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s", bucketName),
Method: "GET",
IsIdempotent: true,
RetryStrategy: strategy,
UniqueID: uuid.New().String(),
Timeout: timeout,
parentSpan: tracectx,
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return nil, bktErr
}
return nil, makeMgmtBadStatusError("failed to get bucket", &req, resp)
}
var bucketData jsonBucketSettings
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&bucketData)
if err != nil {
return nil, err
}
var settings BucketSettings
err = settings.fromData(bucketData)
if err != nil {
return nil, err
}
return &settings, nil
}
// GetAllBucketsOptions is the set of options available to the bucket manager GetAll operation.
type GetAllBucketsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllBuckets returns a list of all active buckets on the cluster.
func (bm *BucketManager) GetAllBuckets(opts *GetAllBucketsOptions) (map[string]BucketSettings, error) {
if opts == nil {
opts = &GetAllBucketsOptions{}
}
span := bm.tracer.StartSpan("GetAllBuckets", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: "/pools/default/buckets",
Method: "GET",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return nil, bktErr
}
return nil, makeMgmtBadStatusError("failed to get all buckets", &req, resp)
}
var bucketsData []*jsonBucketSettings
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&bucketsData)
if err != nil {
return nil, err
}
buckets := make(map[string]BucketSettings, len(bucketsData))
for _, bucketData := range bucketsData {
var bucket BucketSettings
err := bucket.fromData(*bucketData)
if err != nil {
return nil, err
}
buckets[bucket.Name] = bucket
}
return buckets, nil
}
// CreateBucketSettings are the settings available when creating a bucket.
type CreateBucketSettings struct {
BucketSettings
ConflictResolutionType ConflictResolutionType
}
// CreateBucketOptions is the set of options available to the bucket manager CreateBucket operation.
type CreateBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateBucket creates a bucket on the cluster.
func (bm *BucketManager) CreateBucket(settings CreateBucketSettings, opts *CreateBucketOptions) error {
if opts == nil {
opts = &CreateBucketOptions{}
}
span := bm.tracer.StartSpan("CreateBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts, err := bm.settingsToPostData(&settings.BucketSettings)
if err != nil {
return err
}
if settings.ConflictResolutionType != "" {
posts.Add("conflictResolutionType", string(settings.ConflictResolutionType))
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: "/pools/default/buckets",
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 202 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return bktErr
}
return makeMgmtBadStatusError("failed to create bucket", &req, resp)
}
return nil
}
// UpdateBucketOptions is the set of options available to the bucket manager UpdateBucket operation.
type UpdateBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpdateBucket updates a bucket on the cluster.
func (bm *BucketManager) UpdateBucket(settings BucketSettings, opts *UpdateBucketOptions) error {
if opts == nil {
opts = &UpdateBucketOptions{}
}
span := bm.tracer.StartSpan("UpdateBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
posts, err := bm.settingsToPostData(&settings)
if err != nil {
return err
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s", settings.Name),
Method: "POST",
Body: []byte(posts.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return bktErr
}
return makeMgmtBadStatusError("failed to update bucket", &req, resp)
}
return nil
}
// DropBucketOptions is the set of options available to the bucket manager DropBucket operation.
type DropBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropBucket will delete a bucket from the cluster by name.
func (bm *BucketManager) DropBucket(name string, opts *DropBucketOptions) error {
if opts == nil {
opts = &DropBucketOptions{}
}
span := bm.tracer.StartSpan("DropBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s", name),
Method: "DELETE",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
bktErr := bm.tryParseErrorMessage(&req, resp)
if bktErr != nil {
return bktErr
}
return makeMgmtBadStatusError("failed to drop bucket", &req, resp)
}
return nil
}
// FlushBucketOptions is the set of options available to the bucket manager FlushBucket operation.
type FlushBucketOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// FlushBucket will delete all the of the data from a bucket.
// Keep in mind that you must have flushing enabled in the buckets configuration.
func (bm *BucketManager) FlushBucket(name string, opts *FlushBucketOptions) error {
if opts == nil {
opts = &FlushBucketOptions{}
}
span := bm.tracer.StartSpan("FlushBucket", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Path: fmt.Sprintf("/pools/default/buckets/%s/controller/doFlush", name),
Method: "POST",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := bm.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
return bm.tryParseFlushErrorMessage(&req, resp)
}
return nil
}
func (bm *BucketManager) settingsToPostData(settings *BucketSettings) (url.Values, error) {
posts := url.Values{}
if settings.Name == "" {
return nil, makeInvalidArgumentsError("Name invalid, must be set.")
}
if settings.RAMQuotaMB < 100 {
return nil, makeInvalidArgumentsError("Memory quota invalid, must be greater than 100MB")
}
if settings.MaxTTL > 0 && settings.BucketType == MemcachedBucketType {
return nil, makeInvalidArgumentsError("maxTTL is not supported for memcached buckets")
}
posts.Add("name", settings.Name)
// posts.Add("saslPassword", settings.Password)
if settings.FlushEnabled {
posts.Add("flushEnabled", "1")
} else {
posts.Add("flushEnabled", "0")
}
// replicaIndex can't be set at all on ephemeral buckets.
if settings.BucketType != EphemeralBucketType {
if settings.ReplicaIndexDisabled {
posts.Add("replicaIndex", "0")
} else {
posts.Add("replicaIndex", "1")
}
}
switch settings.BucketType {
case CouchbaseBucketType:
posts.Add("bucketType", string(settings.BucketType))
posts.Add("replicaNumber", fmt.Sprintf("%d", settings.NumReplicas))
case MemcachedBucketType:
posts.Add("bucketType", string(settings.BucketType))
if settings.NumReplicas > 0 {
return nil, makeInvalidArgumentsError("replicas cannot be used with memcached buckets")
}
case EphemeralBucketType:
posts.Add("bucketType", string(settings.BucketType))
posts.Add("replicaNumber", fmt.Sprintf("%d", settings.NumReplicas))
default:
return nil, makeInvalidArgumentsError("Unrecognized bucket type")
}
posts.Add("ramQuotaMB", fmt.Sprintf("%d", settings.RAMQuotaMB))
if settings.EvictionPolicy != "" {
switch settings.BucketType {
case MemcachedBucketType:
return nil, makeInvalidArgumentsError("eviction policy is not valid for memcached buckets")
case CouchbaseBucketType:
if settings.EvictionPolicy == EvictionPolicyTypeNoEviction || settings.EvictionPolicy == EvictionPolicyTypeNotRecentlyUsed {
return nil, makeInvalidArgumentsError("eviction policy is not valid for couchbase buckets")
}
case EphemeralBucketType:
if settings.EvictionPolicy == EvictionPolicyTypeFull || settings.EvictionPolicy == EvictionPolicyTypeValueOnly {
return nil, makeInvalidArgumentsError("eviction policy is not valid for ephemeral buckets")
}
}
posts.Add("evictionPolicy", string(settings.EvictionPolicy))
}
if settings.MaxTTL > 0 {
posts.Add("maxTTL", fmt.Sprintf("%d", settings.MaxTTL/time.Second))
}
if settings.CompressionMode != "" {
posts.Add("compressionMode", string(settings.CompressionMode))
}
return posts, nil
}

View File

@@ -1,128 +0,0 @@
package gocb
import (
"encoding/json"
"time"
"github.com/couchbase/gocbcore/v9"
"github.com/google/uuid"
)
// EndPointDiagnostics represents a single entry in a diagnostics report.
type EndPointDiagnostics struct {
Type ServiceType
ID string
Local string
Remote string
LastActivity time.Time
State EndpointState
Namespace string
}
// DiagnosticsResult encapsulates the results of a Diagnostics operation.
type DiagnosticsResult struct {
ID string
Services map[string][]EndPointDiagnostics
sdk string
State ClusterState
}
type jsonDiagnosticEntry struct {
ID string `json:"id,omitempty"`
LastActivityUs uint64 `json:"last_activity_us,omitempty"`
Remote string `json:"remote,omitempty"`
Local string `json:"local,omitempty"`
State string `json:"state,omitempty"`
Details string `json:"details,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
type jsonDiagnosticReport struct {
Version int16 `json:"version"`
SDK string `json:"sdk,omitempty"`
ID string `json:"id,omitempty"`
Services map[string][]jsonDiagnosticEntry `json:"services"`
State string `json:"state"`
}
// MarshalJSON generates a JSON representation of this diagnostics report.
func (report *DiagnosticsResult) MarshalJSON() ([]byte, error) {
jsonReport := jsonDiagnosticReport{
Version: 2,
SDK: report.sdk,
ID: report.ID,
Services: make(map[string][]jsonDiagnosticEntry),
State: clusterStateToString(report.State),
}
for _, serviceType := range report.Services {
for _, service := range serviceType {
serviceStr := serviceTypeToString(service.Type)
stateStr := endpointStateToString(service.State)
jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonDiagnosticEntry{
ID: service.ID,
LastActivityUs: uint64(time.Since(service.LastActivity).Nanoseconds()),
Remote: service.Remote,
Local: service.Local,
State: stateStr,
Details: "",
Namespace: service.Namespace,
})
}
}
return json.Marshal(&jsonReport)
}
// DiagnosticsOptions are the options that are available for use with the Diagnostics operation.
type DiagnosticsOptions struct {
ReportID string
}
// Diagnostics returns information about the internal state of the SDK.
func (c *Cluster) Diagnostics(opts *DiagnosticsOptions) (*DiagnosticsResult, error) {
if opts == nil {
opts = &DiagnosticsOptions{}
}
if opts.ReportID == "" {
opts.ReportID = uuid.New().String()
}
provider, err := c.getDiagnosticsProvider()
if err != nil {
return nil, err
}
agentReport, err := provider.Diagnostics(gocbcore.DiagnosticsOptions{})
if err != nil {
return nil, err
}
report := &DiagnosticsResult{
ID: opts.ReportID,
Services: make(map[string][]EndPointDiagnostics),
sdk: Identifier(),
State: ClusterState(agentReport.State),
}
report.Services["kv"] = make([]EndPointDiagnostics, 0)
for _, conn := range agentReport.MemdConns {
state := EndpointState(conn.State)
report.Services["kv"] = append(report.Services["kv"], EndPointDiagnostics{
Type: ServiceTypeKeyValue,
State: state,
Local: conn.LocalAddr,
Remote: conn.RemoteAddr,
LastActivity: conn.LastActivity,
Namespace: conn.Scope,
ID: conn.ID,
})
}
return report, nil
}

View File

@@ -1,92 +0,0 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
"github.com/google/uuid"
)
// Ping will ping a list of services and verify they are active and
// responding in an acceptable period of time.
func (c *Cluster) Ping(opts *PingOptions) (*PingResult, error) {
if opts == nil {
opts = &PingOptions{}
}
provider, err := c.getDiagnosticsProvider()
if err != nil {
return nil, err
}
return ping(provider, opts, c.timeoutsConfig)
}
func ping(provider diagnosticsProvider, opts *PingOptions, timeouts TimeoutsConfig) (*PingResult, error) {
services := opts.ServiceTypes
gocbcoreServices := make([]gocbcore.ServiceType, len(services))
for i, svc := range services {
gocbcoreServices[i] = gocbcore.ServiceType(svc)
}
coreopts := gocbcore.PingOptions{
ServiceTypes: gocbcoreServices,
}
now := time.Now()
timeout := opts.Timeout
if timeout == 0 {
coreopts.KVDeadline = now.Add(timeouts.KVTimeout)
coreopts.CapiDeadline = now.Add(timeouts.ViewTimeout)
coreopts.N1QLDeadline = now.Add(timeouts.QueryTimeout)
coreopts.CbasDeadline = now.Add(timeouts.AnalyticsTimeout)
coreopts.FtsDeadline = now.Add(timeouts.SearchTimeout)
coreopts.MgmtDeadline = now.Add(timeouts.ManagementTimeout)
} else {
coreopts.KVDeadline = now.Add(timeout)
coreopts.CapiDeadline = now.Add(timeout)
coreopts.N1QLDeadline = now.Add(timeout)
coreopts.CbasDeadline = now.Add(timeout)
coreopts.FtsDeadline = now.Add(timeout)
coreopts.MgmtDeadline = now.Add(timeout)
}
id := opts.ReportID
if id == "" {
id = uuid.New().String()
}
result, err := provider.Ping(coreopts)
if err != nil {
return nil, err
}
reportSvcs := make(map[ServiceType][]EndpointPingReport)
for svcType, svc := range result.Services {
st := ServiceType(svcType)
svcs := make([]EndpointPingReport, len(svc))
for i, rep := range svc {
var errStr string
if rep.Error != nil {
errStr = rep.Error.Error()
}
svcs[i] = EndpointPingReport{
ID: rep.ID,
Remote: rep.Endpoint,
State: PingState(rep.State),
Error: errStr,
Namespace: rep.Scope,
Latency: rep.Latency,
}
}
reportSvcs[st] = svcs
}
return &PingResult{
ID: id,
sdk: Identifier() + " " + "gocbcore/" + gocbcore.Version(),
Services: reportSvcs,
}, nil
}

View File

@@ -1,314 +0,0 @@
package gocb
import (
"encoding/json"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type jsonQueryMetrics struct {
ElapsedTime string `json:"elapsedTime"`
ExecutionTime string `json:"executionTime"`
ResultCount uint64 `json:"resultCount"`
ResultSize uint64 `json:"resultSize"`
MutationCount uint64 `json:"mutationCount,omitempty"`
SortCount uint64 `json:"sortCount,omitempty"`
ErrorCount uint64 `json:"errorCount,omitempty"`
WarningCount uint64 `json:"warningCount,omitempty"`
}
type jsonQueryWarning struct {
Code uint32 `json:"code"`
Message string `json:"msg"`
}
type jsonQueryResponse struct {
RequestID string `json:"requestID"`
ClientContextID string `json:"clientContextID"`
Status QueryStatus `json:"status"`
Warnings []jsonQueryWarning `json:"warnings"`
Metrics jsonQueryMetrics `json:"metrics"`
Profile interface{} `json:"profile"`
Signature interface{} `json:"signature"`
Prepared string `json:"prepared"`
}
// QueryMetrics encapsulates various metrics gathered during a queries execution.
type QueryMetrics struct {
ElapsedTime time.Duration
ExecutionTime time.Duration
ResultCount uint64
ResultSize uint64
MutationCount uint64
SortCount uint64
ErrorCount uint64
WarningCount uint64
}
func (metrics *QueryMetrics) fromData(data jsonQueryMetrics) error {
elapsedTime, err := time.ParseDuration(data.ElapsedTime)
if err != nil {
logDebugf("Failed to parse query metrics elapsed time: %s", err)
}
executionTime, err := time.ParseDuration(data.ExecutionTime)
if err != nil {
logDebugf("Failed to parse query metrics execution time: %s", err)
}
metrics.ElapsedTime = elapsedTime
metrics.ExecutionTime = executionTime
metrics.ResultCount = data.ResultCount
metrics.ResultSize = data.ResultSize
metrics.MutationCount = data.MutationCount
metrics.SortCount = data.SortCount
metrics.ErrorCount = data.ErrorCount
metrics.WarningCount = data.WarningCount
return nil
}
// QueryWarning encapsulates any warnings returned by a query.
type QueryWarning struct {
Code uint32
Message string
}
func (warning *QueryWarning) fromData(data jsonQueryWarning) error {
warning.Code = data.Code
warning.Message = data.Message
return nil
}
// QueryMetaData provides access to the meta-data properties of a query result.
type QueryMetaData struct {
RequestID string
ClientContextID string
Status QueryStatus
Metrics QueryMetrics
Signature interface{}
Warnings []QueryWarning
Profile interface{}
preparedName string
}
func (meta *QueryMetaData) fromData(data jsonQueryResponse) error {
metrics := QueryMetrics{}
if err := metrics.fromData(data.Metrics); err != nil {
return err
}
warnings := make([]QueryWarning, len(data.Warnings))
for wIdx, jsonWarning := range data.Warnings {
err := warnings[wIdx].fromData(jsonWarning)
if err != nil {
return err
}
}
meta.RequestID = data.RequestID
meta.ClientContextID = data.ClientContextID
meta.Status = data.Status
meta.Metrics = metrics
meta.Signature = data.Signature
meta.Warnings = warnings
meta.Profile = data.Profile
meta.preparedName = data.Prepared
return nil
}
// QueryResult allows access to the results of a query.
type QueryResult struct {
reader queryRowReader
rowBytes []byte
}
func newQueryResult(reader queryRowReader) *QueryResult {
return &QueryResult{
reader: reader,
}
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *QueryResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.rowBytes = rowBytes
return true
}
// Row returns the contents of the current row
func (r *QueryResult) Row(valuePtr interface{}) error {
if r.rowBytes == nil {
return ErrNoResult
}
if bytesPtr, ok := valuePtr.(*json.RawMessage); ok {
*bytesPtr = r.rowBytes
return nil
}
return json.Unmarshal(r.rowBytes, valuePtr)
}
// Err returns any errors that have occurred on the stream
func (r *QueryResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *QueryResult) Close() error {
return r.reader.Close()
}
// One assigns the first value from the results into the value pointer.
// It will close the results but not before iterating through all remaining
// results, as such this should only be used for very small resultsets - ideally
// of, at most, length 1.
func (r *QueryResult) One(valuePtr interface{}) error {
// Read the bytes from the first row
valueBytes := r.reader.NextRow()
if valueBytes == nil {
return ErrNoResult
}
// Skip through the remaining rows
for r.reader.NextRow() != nil {
// do nothing with the row
}
return json.Unmarshal(valueBytes, valuePtr)
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *QueryResult) MetaData() (*QueryMetaData, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return nil, err
}
var jsonResp jsonQueryResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return nil, err
}
var metaData QueryMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
type queryRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
PreparedName() (string, error)
}
// Query executes the query statement on the server.
func (c *Cluster) Query(statement string, opts *QueryOptions) (*QueryResult, error) {
if opts == nil {
opts = &QueryOptions{}
}
span := c.tracer.StartSpan("Query", opts.parentSpan).
SetTag("couchbase.service", "query")
defer span.Finish()
timeout := opts.Timeout
if timeout == 0 {
timeout = c.timeoutsConfig.QueryTimeout
}
deadline := time.Now().Add(timeout)
retryStrategy := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy)
}
queryOpts, err := opts.toMap()
if err != nil {
return nil, QueryError{
InnerError: wrapError(err, "failed to generate query options"),
Statement: statement,
ClientContextID: opts.ClientContextID,
}
}
queryOpts["statement"] = statement
return c.execN1qlQuery(span, queryOpts, deadline, retryStrategy, opts.Adhoc)
}
func maybeGetQueryOption(options map[string]interface{}, name string) string {
if value, ok := options[name].(string); ok {
return value
}
return ""
}
func (c *Cluster) execN1qlQuery(
span requestSpan,
options map[string]interface{},
deadline time.Time,
retryStrategy *retryStrategyWrapper,
adHoc bool,
) (*QueryResult, error) {
provider, err := c.getQueryProvider()
if err != nil {
return nil, QueryError{
InnerError: wrapError(err, "failed to get query provider"),
Statement: maybeGetQueryOption(options, "statement"),
ClientContextID: maybeGetQueryOption(options, "client_context_id"),
}
}
eSpan := c.tracer.StartSpan("request_encoding", span.Context())
reqBytes, err := json.Marshal(options)
eSpan.Finish()
if err != nil {
return nil, QueryError{
InnerError: wrapError(err, "failed to marshall query body"),
Statement: maybeGetQueryOption(options, "statement"),
ClientContextID: maybeGetQueryOption(options, "client_context_id"),
}
}
var res queryRowReader
var qErr error
if adHoc {
res, qErr = provider.N1QLQuery(gocbcore.N1QLQueryOptions{
Payload: reqBytes,
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
} else {
res, qErr = provider.PreparedN1QLQuery(gocbcore.N1QLQueryOptions{
Payload: reqBytes,
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
}
if qErr != nil {
return nil, maybeEnhanceQueryError(qErr)
}
return newQueryResult(res), nil
}

View File

@@ -1,558 +0,0 @@
package gocb
import (
"encoding/json"
"errors"
"regexp"
"strings"
"time"
)
// QueryIndexManager provides methods for performing Couchbase query index management.
type QueryIndexManager struct {
provider queryIndexQueryProvider
globalTimeout time.Duration
tracer requestTracer
}
type queryIndexQueryProvider interface {
Query(statement string, opts *QueryOptions) (*QueryResult, error)
}
func (qm *QueryIndexManager) tryParseErrorMessage(err error) error {
var qErr *QueryError
if !errors.As(err, &qErr) {
return err
}
if len(qErr.Errors) == 0 {
return err
}
firstErr := qErr.Errors[0]
var innerErr error
// The server doesn't return meaningful error codes when it comes to index management so we need to go spelunking.
msg := strings.ToLower(firstErr.Message)
if match, err := regexp.MatchString(".*?ndex .*? not found.*", msg); err == nil && match {
innerErr = ErrIndexNotFound
} else if match, err := regexp.MatchString(".*?ndex .*? already exists.*", msg); err == nil && match {
innerErr = ErrIndexExists
}
if innerErr == nil {
return err
}
return QueryError{
InnerError: innerErr,
Statement: qErr.Statement,
ClientContextID: qErr.ClientContextID,
Errors: qErr.Errors,
Endpoint: qErr.Endpoint,
RetryReasons: qErr.RetryReasons,
RetryAttempts: qErr.RetryAttempts,
}
}
func (qm *QueryIndexManager) doQuery(q string, opts *QueryOptions) ([][]byte, error) {
if opts.Timeout == 0 {
opts.Timeout = qm.globalTimeout
}
result, err := qm.provider.Query(q, opts)
if err != nil {
return nil, qm.tryParseErrorMessage(err)
}
var rows [][]byte
for result.Next() {
var row json.RawMessage
err := result.Row(&row)
if err != nil {
logWarnf("management operation failed to read row: %s", err)
} else {
rows = append(rows, row)
}
}
err = result.Err()
if err != nil {
return nil, qm.tryParseErrorMessage(err)
}
return rows, nil
}
type jsonQueryIndex struct {
Name string `json:"name"`
IsPrimary bool `json:"is_primary"`
Type QueryIndexType `json:"using"`
State string `json:"state"`
Keyspace string `json:"keyspace_id"`
Namespace string `json:"namespace_id"`
IndexKey []string `json:"index_key"`
Condition string `json:"condition"`
}
// QueryIndex represents a Couchbase GSI index.
type QueryIndex struct {
Name string
IsPrimary bool
Type QueryIndexType
State string
Keyspace string
Namespace string
IndexKey []string
Condition string
}
func (index *QueryIndex) fromData(data jsonQueryIndex) error {
index.Name = data.Name
index.IsPrimary = data.IsPrimary
index.Type = data.Type
index.State = data.State
index.Keyspace = data.Keyspace
index.Namespace = data.Namespace
index.IndexKey = data.IndexKey
index.Condition = data.Condition
return nil
}
type createQueryIndexOptions struct {
IgnoreIfExists bool
Deferred bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
func (qm *QueryIndexManager) createIndex(
tracectx requestSpanContext,
bucketName, indexName string,
fields []string,
opts createQueryIndexOptions,
) error {
var qs string
if len(fields) == 0 {
qs += "CREATE PRIMARY INDEX"
} else {
qs += "CREATE INDEX"
}
if indexName != "" {
qs += " `" + indexName + "`"
}
qs += " ON `" + bucketName + "`"
if len(fields) > 0 {
qs += " ("
for i := 0; i < len(fields); i++ {
if i > 0 {
qs += ", "
}
qs += "`" + fields[i] + "`"
}
qs += ")"
}
if opts.Deferred {
qs += " WITH {\"defer_build\": true}"
}
_, err := qm.doQuery(qs, &QueryOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
})
if err == nil {
return nil
}
if opts.IgnoreIfExists && errors.Is(err, ErrIndexExists) {
return nil
}
return err
}
// CreateQueryIndexOptions is the set of options available to the query indexes CreateIndex operation.
type CreateQueryIndexOptions struct {
IgnoreIfExists bool
Deferred bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreateIndex creates an index over the specified fields.
func (qm *QueryIndexManager) CreateIndex(bucketName, indexName string, fields []string, opts *CreateQueryIndexOptions) error {
if opts == nil {
opts = &CreateQueryIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{
message: "an invalid index name was specified",
}
}
if len(fields) <= 0 {
return invalidArgumentsError{
message: "you must specify at least one field to index",
}
}
span := qm.tracer.StartSpan("CreateIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.createIndex(span.Context(), bucketName, indexName, fields, createQueryIndexOptions{
IgnoreIfExists: opts.IgnoreIfExists,
Deferred: opts.Deferred,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
// CreatePrimaryQueryIndexOptions is the set of options available to the query indexes CreatePrimaryIndex operation.
type CreatePrimaryQueryIndexOptions struct {
IgnoreIfExists bool
Deferred bool
CustomName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// CreatePrimaryIndex creates a primary index. An empty customName uses the default naming.
func (qm *QueryIndexManager) CreatePrimaryIndex(bucketName string, opts *CreatePrimaryQueryIndexOptions) error {
if opts == nil {
opts = &CreatePrimaryQueryIndexOptions{}
}
span := qm.tracer.StartSpan("CreatePrimaryIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.createIndex(
span.Context(),
bucketName,
opts.CustomName,
nil,
createQueryIndexOptions{
IgnoreIfExists: opts.IgnoreIfExists,
Deferred: opts.Deferred,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
type dropQueryIndexOptions struct {
IgnoreIfNotExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
func (qm *QueryIndexManager) dropIndex(
tracectx requestSpanContext,
bucketName, indexName string,
opts dropQueryIndexOptions,
) error {
var qs string
if indexName == "" {
qs += "DROP PRIMARY INDEX ON `" + bucketName + "`"
} else {
qs += "DROP INDEX `" + bucketName + "`.`" + indexName + "`"
}
_, err := qm.doQuery(qs, &QueryOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
})
if err == nil {
return nil
}
if opts.IgnoreIfNotExists && errors.Is(err, ErrIndexNotFound) {
return nil
}
return err
}
// DropQueryIndexOptions is the set of options available to the query indexes DropIndex operation.
type DropQueryIndexOptions struct {
IgnoreIfNotExists bool
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropIndex drops a specific index by name.
func (qm *QueryIndexManager) DropIndex(bucketName, indexName string, opts *DropQueryIndexOptions) error {
if opts == nil {
opts = &DropQueryIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{
message: "an invalid index name was specified",
}
}
span := qm.tracer.StartSpan("DropIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.dropIndex(
span.Context(),
bucketName,
indexName,
dropQueryIndexOptions{
IgnoreIfNotExists: opts.IgnoreIfNotExists,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
// DropPrimaryQueryIndexOptions is the set of options available to the query indexes DropPrimaryIndex operation.
type DropPrimaryQueryIndexOptions struct {
IgnoreIfNotExists bool
CustomName string
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropPrimaryIndex drops the primary index. Pass an empty customName for unnamed primary indexes.
func (qm *QueryIndexManager) DropPrimaryIndex(bucketName string, opts *DropPrimaryQueryIndexOptions) error {
if opts == nil {
opts = &DropPrimaryQueryIndexOptions{}
}
span := qm.tracer.StartSpan("DropPrimaryIndex", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.dropIndex(
span.Context(),
bucketName,
opts.CustomName,
dropQueryIndexOptions{
IgnoreIfNotExists: opts.IgnoreIfNotExists,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
}
// GetAllQueryIndexesOptions is the set of options available to the query indexes GetAllIndexes operation.
type GetAllQueryIndexesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllIndexes returns a list of all currently registered indexes.
func (qm *QueryIndexManager) GetAllIndexes(bucketName string, opts *GetAllQueryIndexesOptions) ([]QueryIndex, error) {
if opts == nil {
opts = &GetAllQueryIndexesOptions{}
}
span := qm.tracer.StartSpan("GetAllIndexes", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
return qm.getAllIndexes(span.Context(), bucketName, opts)
}
func (qm *QueryIndexManager) getAllIndexes(
tracectx requestSpanContext,
bucketName string,
opts *GetAllQueryIndexesOptions,
) ([]QueryIndex, error) {
q := "SELECT `indexes`.* FROM system:indexes WHERE keyspace_id=? AND `using`=\"gsi\""
rows, err := qm.doQuery(q, &QueryOptions{
PositionalParameters: []interface{}{bucketName},
Readonly: true,
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: tracectx,
})
if err != nil {
return nil, err
}
var indexes []QueryIndex
for _, row := range rows {
var jsonIdx jsonQueryIndex
err := json.Unmarshal(row, &jsonIdx)
if err != nil {
return nil, err
}
var index QueryIndex
err = index.fromData(jsonIdx)
if err != nil {
return nil, err
}
indexes = append(indexes, index)
}
return indexes, nil
}
// BuildDeferredQueryIndexOptions is the set of options available to the query indexes BuildDeferredIndexes operation.
type BuildDeferredQueryIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// BuildDeferredIndexes builds all indexes which are currently in deferred state.
func (qm *QueryIndexManager) BuildDeferredIndexes(bucketName string, opts *BuildDeferredQueryIndexOptions) ([]string, error) {
if opts == nil {
opts = &BuildDeferredQueryIndexOptions{}
}
span := qm.tracer.StartSpan("BuildDeferredIndexes", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
indexList, err := qm.getAllIndexes(
span.Context(),
bucketName,
&GetAllQueryIndexesOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
})
if err != nil {
return nil, err
}
var deferredList []string
for i := 0; i < len(indexList); i++ {
var index = indexList[i]
if index.State == "deferred" || index.State == "pending" {
deferredList = append(deferredList, index.Name)
}
}
if len(deferredList) == 0 {
// Don't try to build an empty index list
return nil, nil
}
var qs string
qs += "BUILD INDEX ON `" + bucketName + "`("
for i := 0; i < len(deferredList); i++ {
if i > 0 {
qs += ", "
}
qs += "`" + deferredList[i] + "`"
}
qs += ")"
_, err = qm.doQuery(qs, &QueryOptions{
Timeout: opts.Timeout,
RetryStrategy: opts.RetryStrategy,
parentSpan: span,
})
if err != nil {
return nil, err
}
return deferredList, nil
}
func checkIndexesActive(indexes []QueryIndex, checkList []string) (bool, error) {
var checkIndexes []QueryIndex
for i := 0; i < len(checkList); i++ {
indexName := checkList[i]
for j := 0; j < len(indexes); j++ {
if indexes[j].Name == indexName {
checkIndexes = append(checkIndexes, indexes[j])
break
}
}
}
if len(checkIndexes) != len(checkList) {
return false, ErrIndexNotFound
}
for i := 0; i < len(checkIndexes); i++ {
if checkIndexes[i].State != "online" {
return false, nil
}
}
return true, nil
}
// WatchQueryIndexOptions is the set of options available to the query indexes Watch operation.
type WatchQueryIndexOptions struct {
WatchPrimary bool
RetryStrategy RetryStrategy
}
// WatchIndexes waits for a set of indexes to come online.
func (qm *QueryIndexManager) WatchIndexes(bucketName string, watchList []string, timeout time.Duration, opts *WatchQueryIndexOptions) error {
if opts == nil {
opts = &WatchQueryIndexOptions{}
}
span := qm.tracer.StartSpan("WatchIndexes", nil).
SetTag("couchbase.service", "query")
defer span.Finish()
if opts.WatchPrimary {
watchList = append(watchList, "#primary")
}
deadline := time.Now().Add(timeout)
curInterval := 50 * time.Millisecond
for {
if deadline.Before(time.Now()) {
return ErrUnambiguousTimeout
}
indexes, err := qm.getAllIndexes(
span.Context(),
bucketName,
&GetAllQueryIndexesOptions{
Timeout: time.Until(deadline),
RetryStrategy: opts.RetryStrategy,
})
if err != nil {
return err
}
allOnline, err := checkIndexesActive(indexes, watchList)
if err != nil {
return err
}
if allOnline {
break
}
curInterval += 500 * time.Millisecond
if curInterval > 1000 {
curInterval = 1000
}
// Make sure we don't sleep past our overall deadline, if we adjust the
// deadline then it will be caught at the top of this loop as a timeout.
sleepDeadline := time.Now().Add(curInterval)
if sleepDeadline.After(deadline) {
sleepDeadline = deadline
}
// wait till our next poll interval
time.Sleep(time.Until(sleepDeadline))
}
return nil
}

View File

@@ -1,670 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/pkg/errors"
)
type jsonSearchIndexResp struct {
Status string `json:"status"`
IndexDef *jsonSearchIndex `json:"indexDef"`
}
type jsonSearchIndexDefs struct {
IndexDefs map[string]jsonSearchIndex `json:"indexDefs"`
ImplVersion string `json:"implVersion"`
}
type jsonSearchIndexesResp struct {
Status string `json:"status"`
IndexDefs jsonSearchIndexDefs `json:"indexDefs"`
}
type jsonSearchIndex struct {
UUID string `json:"uuid"`
Name string `json:"name"`
SourceName string `json:"sourceName"`
Type string `json:"type"`
Params map[string]interface{} `json:"params"`
SourceUUID string `json:"sourceUUID"`
SourceParams map[string]interface{} `json:"sourceParams"`
SourceType string `json:"sourceType"`
PlanParams map[string]interface{} `json:"planParams"`
}
// SearchIndex is used to define a search index.
type SearchIndex struct {
// UUID is required for updates. It provides a means of ensuring consistency, the UUID must match the UUID value
// for the index on the server.
UUID string
// Name represents the name of this index.
Name string
// SourceName is the name of the source of the data for the index e.g. bucket name.
SourceName string
// Type is the type of index, e.g. fulltext-index or fulltext-alias.
Type string
// IndexParams are index properties such as store type and mappings.
Params map[string]interface{}
// SourceUUID is the UUID of the data source, this can be used to more tightly tie the index to a source.
SourceUUID string
// SourceParams are extra parameters to be defined. These are usually things like advanced connection and tuning
// parameters.
SourceParams map[string]interface{}
// SourceType is the type of the data source, e.g. couchbase or nil depending on the Type field.
SourceType string
// PlanParams are plan properties such as number of replicas and number of partitions.
PlanParams map[string]interface{}
}
func (si *SearchIndex) fromData(data jsonSearchIndex) error {
si.UUID = data.UUID
si.Name = data.Name
si.SourceName = data.SourceName
si.Type = data.Type
si.Params = data.Params
si.SourceUUID = data.SourceUUID
si.SourceParams = data.SourceParams
si.SourceType = data.SourceType
si.PlanParams = data.PlanParams
return nil
}
func (si *SearchIndex) toData() (jsonSearchIndex, error) {
var data jsonSearchIndex
data.UUID = si.UUID
data.Name = si.Name
data.SourceName = si.SourceName
data.Type = si.Type
data.Params = si.Params
data.SourceUUID = si.SourceUUID
data.SourceParams = si.SourceParams
data.SourceType = si.SourceType
data.PlanParams = si.PlanParams
return data, nil
}
// SearchIndexManager provides methods for performing Couchbase search index management.
type SearchIndexManager struct {
mgmtProvider mgmtProvider
tracer requestTracer
}
func (sm *SearchIndexManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read search index response body: %s", err)
return nil
}
var bodyErr error
if strings.Contains(strings.ToLower(string(b)), "index not found") {
bodyErr = ErrIndexNotFound
} else if strings.Contains(strings.ToLower(string(b)), "index with the same name already exists") {
bodyErr = ErrIndexExists
} else {
bodyErr = errors.New(string(b))
}
return makeGenericMgmtError(bodyErr, req, resp)
}
func (sm *SearchIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) {
resp, err := sm.mgmtProvider.executeMgmtRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// GetAllSearchIndexOptions is the set of options available to the search indexes GetAllIndexes operation.
type GetAllSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllIndexes retrieves all of the search indexes for the cluster.
func (sm *SearchIndexManager) GetAllIndexes(opts *GetAllSearchIndexOptions) ([]SearchIndex, error) {
if opts == nil {
opts = &GetAllSearchIndexOptions{}
}
span := sm.tracer.StartSpan("GetAllIndexes", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "GET",
Path: "/api/index",
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return nil, idxErr
}
return nil, makeMgmtBadStatusError("failed to get index", &req, resp)
}
var indexesResp jsonSearchIndexesResp
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&indexesResp)
if err != nil {
return nil, err
}
indexDefs := indexesResp.IndexDefs.IndexDefs
var indexes []SearchIndex
for _, indexData := range indexDefs {
var index SearchIndex
err := index.fromData(indexData)
if err != nil {
return nil, err
}
indexes = append(indexes, index)
}
return indexes, nil
}
// GetSearchIndexOptions is the set of options available to the search indexes GetIndex operation.
type GetSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetIndex retrieves a specific search index by name.
func (sm *SearchIndexManager) GetIndex(indexName string, opts *GetSearchIndexOptions) (*SearchIndex, error) {
if opts == nil {
opts = &GetSearchIndexOptions{}
}
span := sm.tracer.StartSpan("GetIndex", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "GET",
Path: fmt.Sprintf("/api/index/%s", indexName),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return nil, idxErr
}
return nil, makeMgmtBadStatusError("failed to get index", &req, resp)
}
var indexResp jsonSearchIndexResp
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&indexResp)
if err != nil {
return nil, err
}
var indexDef SearchIndex
err = indexDef.fromData(*indexResp.IndexDef)
if err != nil {
return nil, err
}
return &indexDef, nil
}
// UpsertSearchIndexOptions is the set of options available to the search index manager UpsertIndex operation.
type UpsertSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpsertIndex creates or updates a search index.
func (sm *SearchIndexManager) UpsertIndex(indexDefinition SearchIndex, opts *UpsertSearchIndexOptions) error {
if opts == nil {
opts = &UpsertSearchIndexOptions{}
}
if indexDefinition.Name == "" {
return invalidArgumentsError{"index name cannot be empty"}
}
if indexDefinition.Type == "" {
return invalidArgumentsError{"index type cannot be empty"}
}
span := sm.tracer.StartSpan("UpsertIndex", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
indexData, err := indexDefinition.toData()
if err != nil {
return err
}
b, err := json.Marshal(indexData)
if err != nil {
return err
}
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "PUT",
Path: fmt.Sprintf("/api/index/%s", indexDefinition.Name),
Headers: map[string]string{
"cache-control": "no-cache",
},
Body: b,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return idxErr
}
return makeMgmtBadStatusError("failed to create index", &req, resp)
}
return nil
}
// DropSearchIndexOptions is the set of options available to the search index DropIndex operation.
type DropSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropIndex removes the search index with the specific name.
func (sm *SearchIndexManager) DropIndex(indexName string, opts *DropSearchIndexOptions) error {
if opts == nil {
opts = &DropSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("DropIndex", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "DELETE",
Path: fmt.Sprintf("/api/index/%s", indexName),
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
return makeMgmtBadStatusError("failed to drop the index", &req, resp)
}
return nil
}
// AnalyzeDocumentOptions is the set of options available to the search index AnalyzeDocument operation.
type AnalyzeDocumentOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// AnalyzeDocument returns how a doc is analyzed against a specific index.
func (sm *SearchIndexManager) AnalyzeDocument(indexName string, doc interface{}, opts *AnalyzeDocumentOptions) ([]interface{}, error) {
if opts == nil {
opts = &AnalyzeDocumentOptions{}
}
if indexName == "" {
return nil, invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("AnalyzeDocument", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
b, err := json.Marshal(doc)
if err != nil {
return nil, err
}
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "POST",
Path: fmt.Sprintf("/api/index/%s/analyzeDoc", indexName),
Body: b,
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return nil, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return nil, idxErr
}
return nil, makeMgmtBadStatusError("failed to analyze document", &req, resp)
}
var analysis struct {
Status string `json:"status"`
Analyzed []interface{} `json:"analyzed"`
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&analysis)
if err != nil {
return nil, err
}
return analysis.Analyzed, nil
}
// GetIndexedDocumentsCountOptions is the set of options available to the search index GetIndexedDocumentsCount operation.
type GetIndexedDocumentsCountOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetIndexedDocumentsCount retrieves the document count for a search index.
func (sm *SearchIndexManager) GetIndexedDocumentsCount(indexName string, opts *GetIndexedDocumentsCountOptions) (uint64, error) {
if opts == nil {
opts = &GetIndexedDocumentsCountOptions{}
}
if indexName == "" {
return 0, invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("GetIndexedDocumentsCount", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: "GET",
Path: fmt.Sprintf("/api/index/%s/count", indexName),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return 0, err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return 0, idxErr
}
return 0, makeMgmtBadStatusError("failed to get the indexed documents count", &req, resp)
}
var count struct {
Count uint64 `json:"count"`
}
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&count)
if err != nil {
return 0, err
}
return count.Count, nil
}
func (sm *SearchIndexManager) performControlRequest(
tracectx requestSpanContext,
method, uri string,
timeout time.Duration,
retryStrategy RetryStrategy,
) error {
req := mgmtRequest{
Service: ServiceTypeSearch,
Method: method,
Path: uri,
IsIdempotent: true,
Timeout: timeout,
RetryStrategy: retryStrategy,
parentSpan: tracectx,
}
resp, err := sm.doMgmtRequest(req)
if err != nil {
return err
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode != 200 {
idxErr := sm.tryParseErrorMessage(&req, resp)
if idxErr != nil {
return idxErr
}
return makeMgmtBadStatusError("failed to perform the control request", &req, resp)
}
return nil
}
// PauseIngestSearchIndexOptions is the set of options available to the search index PauseIngest operation.
type PauseIngestSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// PauseIngest pauses updates and maintenance for an index.
func (sm *SearchIndexManager) PauseIngest(indexName string, opts *PauseIngestSearchIndexOptions) error {
if opts == nil {
opts = &PauseIngestSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("PauseIngest", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/ingestControl/pause", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// ResumeIngestSearchIndexOptions is the set of options available to the search index ResumeIngest operation.
type ResumeIngestSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// ResumeIngest resumes updates and maintenance for an index.
func (sm *SearchIndexManager) ResumeIngest(indexName string, opts *ResumeIngestSearchIndexOptions) error {
if opts == nil {
opts = &ResumeIngestSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("ResumeIngest", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/ingestControl/resume", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// AllowQueryingSearchIndexOptions is the set of options available to the search index AllowQuerying operation.
type AllowQueryingSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// AllowQuerying allows querying against an index.
func (sm *SearchIndexManager) AllowQuerying(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("AllowQuerying", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/queryControl/allow", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// DisallowQueryingSearchIndexOptions is the set of options available to the search index DisallowQuerying operation.
type DisallowQueryingSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DisallowQuerying disallows querying against an index.
func (sm *SearchIndexManager) DisallowQuerying(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("DisallowQuerying", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/queryControl/disallow", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// FreezePlanSearchIndexOptions is the set of options available to the search index FreezePlan operation.
type FreezePlanSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// FreezePlan freezes the assignment of index partitions to nodes.
func (sm *SearchIndexManager) FreezePlan(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("FreezePlan", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/planFreezeControl/freeze", indexName),
opts.Timeout,
opts.RetryStrategy)
}
// UnfreezePlanSearchIndexOptions is the set of options available to the search index UnfreezePlan operation.
type UnfreezePlanSearchIndexOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UnfreezePlan unfreezes the assignment of index partitions to nodes.
func (sm *SearchIndexManager) UnfreezePlan(indexName string, opts *AllowQueryingSearchIndexOptions) error {
if opts == nil {
opts = &AllowQueryingSearchIndexOptions{}
}
if indexName == "" {
return invalidArgumentsError{"indexName cannot be empty"}
}
span := sm.tracer.StartSpan("UnfreezePlan", nil).
SetTag("couchbase.service", "search")
defer span.Finish()
return sm.performControlRequest(
span.Context(),
"POST",
fmt.Sprintf("/api/index/%s/planFreezeControl/unfreeze", indexName),
opts.Timeout,
opts.RetryStrategy)
}

View File

@@ -1,342 +0,0 @@
package gocb
import (
"encoding/json"
"time"
cbsearch "github.com/couchbase/gocb/v2/search"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type jsonRowLocation struct {
Field string `json:"field"`
Term string `json:"term"`
Position uint32 `json:"position"`
Start uint32 `json:"start"`
End uint32 `json:"end"`
ArrayPositions []uint32 `json:"array_positions"`
}
type jsonSearchFacet struct {
Name string `json:"name"`
Field string `json:"field"`
Total uint64 `json:"total"`
Missing uint64 `json:"missing"`
Other uint64 `json:"other"`
}
type jsonSearchRowLocations map[string]map[string][]jsonRowLocation
type jsonSearchRow struct {
Index string `json:"index"`
ID string `json:"id"`
Score float64 `json:"score"`
Explanation interface{} `json:"explanation"`
Locations jsonSearchRowLocations `json:"locations"`
Fragments map[string][]string `json:"fragments"`
Fields json.RawMessage `json:"fields"`
}
type jsonSearchResponse struct {
Errors map[string]string `json:"errors"`
TotalHits uint64 `json:"total_hits"`
MaxScore float64 `json:"max_score"`
Took uint64 `json:"took"`
Facets map[string]jsonSearchFacet `json:"facets"`
}
// SearchMetrics encapsulates various metrics gathered during a search queries execution.
type SearchMetrics struct {
Took time.Duration
TotalRows uint64
MaxScore float64
TotalPartitionCount uint64
SuccessPartitionCount uint64
ErrorPartitionCount uint64
}
func (metrics *SearchMetrics) fromData(data jsonSearchResponse) error {
metrics.TotalRows = data.TotalHits
metrics.MaxScore = data.MaxScore
metrics.Took = time.Duration(data.Took) * time.Microsecond
return nil
}
// SearchMetaData provides access to the meta-data properties of a search query result.
type SearchMetaData struct {
Metrics SearchMetrics
Errors map[string]string
}
func (meta *SearchMetaData) fromData(data jsonSearchResponse) error {
metrics := SearchMetrics{}
if err := metrics.fromData(data); err != nil {
return err
}
meta.Metrics = metrics
meta.Errors = data.Errors
return nil
}
// SearchFacetResult provides access to the result of a faceted query.
type SearchFacetResult struct {
Name string
Field string
Total uint64
Missing uint64
Other uint64
}
func (fr *SearchFacetResult) fromData(data jsonSearchFacet) error {
fr.Name = data.Name
fr.Field = data.Field
fr.Total = data.Total
fr.Missing = data.Missing
fr.Other = data.Other
return nil
}
// SearchRowLocation represents the location of a row match
type SearchRowLocation struct {
Position uint32
Start uint32
End uint32
ArrayPositions []uint32
}
func (rl *SearchRowLocation) fromData(data jsonRowLocation) error {
rl.Position = data.Position
rl.Start = data.Start
rl.End = data.End
rl.ArrayPositions = data.ArrayPositions
return nil
}
// SearchRow represents a single hit returned from a search query.
type SearchRow struct {
Index string
ID string
Score float64
Explanation interface{}
Locations map[string]map[string][]SearchRowLocation
Fragments map[string][]string
fieldsBytes []byte
}
// Fields decodes the fields included in a search hit.
func (sr *SearchRow) Fields(valuePtr interface{}) error {
return json.Unmarshal(sr.fieldsBytes, valuePtr)
}
type searchRowReader interface {
NextRow() []byte
Err() error
MetaData() ([]byte, error)
Close() error
}
// SearchResult allows access to the results of a search query.
type SearchResult struct {
reader searchRowReader
currentRow SearchRow
}
func newSearchResult(reader searchRowReader) *SearchResult {
return &SearchResult{
reader: reader,
}
}
// Next assigns the next result from the results into the value pointer, returning whether the read was successful.
func (r *SearchResult) Next() bool {
rowBytes := r.reader.NextRow()
if rowBytes == nil {
return false
}
r.currentRow = SearchRow{}
var rowData jsonSearchRow
if err := json.Unmarshal(rowBytes, &rowData); err == nil {
r.currentRow.Index = rowData.Index
r.currentRow.ID = rowData.ID
r.currentRow.Score = rowData.Score
r.currentRow.Explanation = rowData.Explanation
r.currentRow.Fragments = rowData.Fragments
r.currentRow.fieldsBytes = rowData.Fields
locations := make(map[string]map[string][]SearchRowLocation)
for fieldName, fieldData := range rowData.Locations {
terms := make(map[string][]SearchRowLocation)
for termName, termData := range fieldData {
locations := make([]SearchRowLocation, len(termData))
for locIdx, locData := range termData {
err := locations[locIdx].fromData(locData)
if err != nil {
logWarnf("failed to parse search query location data: %s", err)
}
}
terms[termName] = locations
}
locations[fieldName] = terms
}
r.currentRow.Locations = locations
}
return true
}
// Row returns the contents of the current row.
func (r *SearchResult) Row() SearchRow {
return r.currentRow
}
// Err returns any errors that have occurred on the stream
func (r *SearchResult) Err() error {
return r.reader.Err()
}
// Close marks the results as closed, returning any errors that occurred during reading the results.
func (r *SearchResult) Close() error {
return r.reader.Close()
}
func (r *SearchResult) getJSONResp() (jsonSearchResponse, error) {
metaDataBytes, err := r.reader.MetaData()
if err != nil {
return jsonSearchResponse{}, err
}
var jsonResp jsonSearchResponse
err = json.Unmarshal(metaDataBytes, &jsonResp)
if err != nil {
return jsonSearchResponse{}, err
}
return jsonResp, nil
}
// MetaData returns any meta-data that was available from this query. Note that
// the meta-data will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *SearchResult) MetaData() (*SearchMetaData, error) {
jsonResp, err := r.getJSONResp()
if err != nil {
return nil, err
}
var metaData SearchMetaData
err = metaData.fromData(jsonResp)
if err != nil {
return nil, err
}
return &metaData, nil
}
// Facets returns any facets that were returned with this query. Note that the
// facets will only be available once the object has been closed (either
// implicitly or explicitly).
func (r *SearchResult) Facets() (map[string]SearchFacetResult, error) {
jsonResp, err := r.getJSONResp()
if err != nil {
return nil, err
}
facets := make(map[string]SearchFacetResult)
for facetName, facetData := range jsonResp.Facets {
var facet SearchFacetResult
err := facet.fromData(facetData)
if err != nil {
return nil, err
}
facets[facetName] = facet
}
return facets, nil
}
// SearchQuery executes the analytics query statement on the server.
func (c *Cluster) SearchQuery(indexName string, query cbsearch.Query, opts *SearchOptions) (*SearchResult, error) {
if opts == nil {
opts = &SearchOptions{}
}
span := c.tracer.StartSpan("SearchQuery", opts.parentSpan).
SetTag("couchbase.service", "search")
defer span.Finish()
timeout := opts.Timeout
if timeout == 0 {
timeout = c.timeoutsConfig.SearchTimeout
}
deadline := time.Now().Add(timeout)
retryStrategy := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy)
}
searchOpts, err := opts.toMap()
if err != nil {
return nil, SearchError{
InnerError: wrapError(err, "failed to generate query options"),
Query: query,
}
}
searchOpts["query"] = query
return c.execSearchQuery(span, indexName, searchOpts, deadline, retryStrategy)
}
func maybeGetSearchOptionQuery(options map[string]interface{}) interface{} {
if value, ok := options["query"]; ok {
return value
}
return ""
}
func (c *Cluster) execSearchQuery(
span requestSpan,
indexName string,
options map[string]interface{},
deadline time.Time,
retryStrategy *retryStrategyWrapper,
) (*SearchResult, error) {
provider, err := c.getSearchProvider()
if err != nil {
return nil, SearchError{
InnerError: wrapError(err, "failed to get query provider"),
Query: maybeGetSearchOptionQuery(options),
}
}
reqBytes, err := json.Marshal(options)
if err != nil {
return nil, SearchError{
InnerError: wrapError(err, "failed to marshall query body"),
Query: maybeGetSearchOptionQuery(options),
}
}
res, err := provider.SearchQuery(gocbcore.SearchQueryOptions{
IndexName: indexName,
Payload: reqBytes,
RetryStrategy: retryStrategy,
Deadline: deadline,
TraceContext: span.Context(),
})
if err != nil {
return nil, maybeEnhanceSearchError(err)
}
return newSearchResult(res), nil
}

View File

@@ -1,792 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
)
// AuthDomain specifies the user domain of a specific user
type AuthDomain string
const (
// LocalDomain specifies users that are locally stored in Couchbase.
LocalDomain AuthDomain = "local"
// ExternalDomain specifies users that are externally stored
// (in LDAP for instance).
ExternalDomain AuthDomain = "external"
)
type jsonOrigin struct {
Type string `json:"type"`
Name string `json:"name"`
}
type jsonRole struct {
RoleName string `json:"role"`
BucketName string `json:"bucket_name"`
}
type jsonRoleDescription struct {
jsonRole
Name string `json:"name"`
Description string `json:"desc"`
}
type jsonRoleOrigins struct {
jsonRole
Origins []jsonOrigin
}
type jsonUserMetadata struct {
ID string `json:"id"`
Name string `json:"name"`
Roles []jsonRoleOrigins `json:"roles"`
Groups []string `json:"groups"`
Domain AuthDomain `json:"domain"`
ExternalGroups []string `json:"external_groups"`
PasswordChanged time.Time `json:"password_change_date"`
}
type jsonGroup struct {
Name string `json:"id"`
Description string `json:"description"`
Roles []jsonRole `json:"roles"`
LDAPGroupReference string `json:"ldap_group_ref"`
}
// Role represents a specific permission.
type Role struct {
Name string `json:"role"`
Bucket string `json:"bucket_name"`
}
func (ro *Role) fromData(data jsonRole) error {
ro.Name = data.RoleName
ro.Bucket = data.BucketName
return nil
}
// RoleAndDescription represents a role with its display name and description.
type RoleAndDescription struct {
Role
DisplayName string
Description string
}
func (rd *RoleAndDescription) fromData(data jsonRoleDescription) error {
err := rd.Role.fromData(data.jsonRole)
if err != nil {
return err
}
rd.DisplayName = data.Name
rd.Description = data.Description
return nil
}
// Origin indicates why a user has a specific role. Is the Origin Type is "user" then the role is assigned
// directly to the user. If the type is "group" then it means that the role has been inherited from the group
// identified by the Name field.
type Origin struct {
Type string
Name string
}
func (o *Origin) fromData(data jsonOrigin) error {
o.Type = data.Type
o.Name = data.Name
return nil
}
// RoleAndOrigins associates a role with its origins.
type RoleAndOrigins struct {
Role
Origins []Origin
}
func (ro *RoleAndOrigins) fromData(data jsonRoleOrigins) error {
err := ro.Role.fromData(data.jsonRole)
if err != nil {
return err
}
origins := make([]Origin, len(data.Origins))
for _, originData := range data.Origins {
var origin Origin
err := origin.fromData(originData)
if err != nil {
return err
}
origins = append(origins, origin)
}
ro.Origins = origins
return nil
}
// User represents a user which was retrieved from the server.
type User struct {
Username string
DisplayName string
// Roles are the roles assigned to the user that are of type "user".
Roles []Role
Groups []string
Password string
}
// UserAndMetadata represents a user and user meta-data from the server.
type UserAndMetadata struct {
User
Domain AuthDomain
// EffectiveRoles are all of the user's roles and the origins.
EffectiveRoles []RoleAndOrigins
ExternalGroups []string
PasswordChanged time.Time
}
func (um *UserAndMetadata) fromData(data jsonUserMetadata) error {
um.User.Username = data.ID
um.User.DisplayName = data.Name
um.User.Groups = data.Groups
um.ExternalGroups = data.ExternalGroups
um.Domain = data.Domain
um.PasswordChanged = data.PasswordChanged
var roles []Role
var effectiveRoles []RoleAndOrigins
for _, roleData := range data.Roles {
var effectiveRole RoleAndOrigins
err := effectiveRole.fromData(roleData)
if err != nil {
return err
}
effectiveRoles = append(effectiveRoles, effectiveRole)
role := effectiveRole.Role
if roleData.Origins == nil {
roles = append(roles, role)
} else {
for _, origin := range effectiveRole.Origins {
if origin.Type == "user" {
roles = append(roles, role)
break
}
}
}
}
um.EffectiveRoles = effectiveRoles
um.User.Roles = roles
return nil
}
// Group represents a user group on the server.
type Group struct {
Name string
Description string
Roles []Role
LDAPGroupReference string
}
func (g *Group) fromData(data jsonGroup) error {
g.Name = data.Name
g.Description = data.Description
g.LDAPGroupReference = data.LDAPGroupReference
roles := make([]Role, len(data.Roles))
for roleIdx, roleData := range data.Roles {
err := roles[roleIdx].fromData(roleData)
if err != nil {
return err
}
}
g.Roles = roles
return nil
}
// UserManager provides methods for performing Couchbase user management.
type UserManager struct {
provider mgmtProvider
tracer requestTracer
}
func (um *UserManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
logDebugf("Failed to read search index response body: %s", err)
return nil
}
var bodyErr error
if resp.StatusCode == 404 {
if strings.Contains(strings.ToLower(string(b)), "unknown user") {
bodyErr = ErrUserNotFound
} else if strings.Contains(strings.ToLower(string(b)), "user was not found") {
bodyErr = ErrUserNotFound
} else if strings.Contains(strings.ToLower(string(b)), "group was not found") {
bodyErr = ErrGroupNotFound
} else if strings.Contains(strings.ToLower(string(b)), "unknown group") {
bodyErr = ErrGroupNotFound
} else {
bodyErr = errors.New(string(b))
}
} else {
bodyErr = errors.New(string(b))
}
return makeGenericMgmtError(bodyErr, req, resp)
}
// GetAllUsersOptions is the set of options available to the user manager GetAll operation.
type GetAllUsersOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// GetAllUsers returns a list of all the users from the cluster.
func (um *UserManager) GetAllUsers(opts *GetAllUsersOptions) ([]UserAndMetadata, error) {
if opts == nil {
opts = &GetAllUsersOptions{}
}
span := um.tracer.StartSpan("GetAllUsers", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: fmt.Sprintf("/settings/rbac/users/%s", opts.DomainName),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get users", &req, resp)
}
var usersData []jsonUserMetadata
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&usersData)
if err != nil {
return nil, err
}
users := make([]UserAndMetadata, len(usersData))
for userIdx, userData := range usersData {
err := users[userIdx].fromData(userData)
if err != nil {
return nil, err
}
}
return users, nil
}
// GetUserOptions is the set of options available to the user manager Get operation.
type GetUserOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// GetUser returns the data for a particular user
func (um *UserManager) GetUser(name string, opts *GetUserOptions) (*UserAndMetadata, error) {
if opts == nil {
opts = &GetUserOptions{}
}
span := um.tracer.StartSpan("GetUser", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, name),
IsIdempotent: true,
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get user", &req, resp)
}
var userData jsonUserMetadata
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&userData)
if err != nil {
return nil, err
}
var user UserAndMetadata
err = user.fromData(userData)
if err != nil {
return nil, err
}
return &user, nil
}
// UpsertUserOptions is the set of options available to the user manager Upsert operation.
type UpsertUserOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// UpsertUser updates a built-in RBAC user on the cluster.
func (um *UserManager) UpsertUser(user User, opts *UpsertUserOptions) error {
if opts == nil {
opts = &UpsertUserOptions{}
}
span := um.tracer.StartSpan("UpsertUser", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
var reqRoleStrs []string
for _, roleData := range user.Roles {
if roleData.Bucket == "" {
reqRoleStrs = append(reqRoleStrs, roleData.Name)
} else {
reqRoleStrs = append(reqRoleStrs, fmt.Sprintf("%s[%s]", roleData.Name, roleData.Bucket))
}
}
reqForm := make(url.Values)
reqForm.Add("name", user.DisplayName)
if user.Password != "" {
reqForm.Add("password", user.Password)
}
if len(user.Groups) > 0 {
reqForm.Add("groups", strings.Join(user.Groups, ","))
}
reqForm.Add("roles", strings.Join(reqRoleStrs, ","))
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "PUT",
Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, user.Username),
Body: []byte(reqForm.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to upsert user", &req, resp)
}
return nil
}
// DropUserOptions is the set of options available to the user manager Drop operation.
type DropUserOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
DomainName string
}
// DropUser removes a built-in RBAC user on the cluster.
func (um *UserManager) DropUser(name string, opts *DropUserOptions) error {
if opts == nil {
opts = &DropUserOptions{}
}
span := um.tracer.StartSpan("DropUser", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
if opts.DomainName == "" {
opts.DomainName = string(LocalDomain)
}
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "DELETE",
Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, name),
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to drop user", &req, resp)
}
return nil
}
// GetRolesOptions is the set of options available to the user manager GetRoles operation.
type GetRolesOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetRoles lists the roles supported by the cluster.
func (um *UserManager) GetRoles(opts *GetRolesOptions) ([]RoleAndDescription, error) {
if opts == nil {
opts = &GetRolesOptions{}
}
span := um.tracer.StartSpan("GetRoles", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: "/settings/rbac/roles",
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get roles", &req, resp)
}
var roleDatas []jsonRoleDescription
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&roleDatas)
if err != nil {
return nil, err
}
roles := make([]RoleAndDescription, len(roleDatas))
for roleIdx, roleData := range roleDatas {
err := roles[roleIdx].fromData(roleData)
if err != nil {
return nil, err
}
}
return roles, nil
}
// GetGroupOptions is the set of options available to the group manager Get operation.
type GetGroupOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetGroup fetches a single group from the server.
func (um *UserManager) GetGroup(groupName string, opts *GetGroupOptions) (*Group, error) {
if groupName == "" {
return nil, makeInvalidArgumentsError("groupName cannot be empty")
}
if opts == nil {
opts = &GetGroupOptions{}
}
span := um.tracer.StartSpan("GetGroup", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: fmt.Sprintf("/settings/rbac/groups/%s", groupName),
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get group", &req, resp)
}
var groupData jsonGroup
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&groupData)
if err != nil {
return nil, err
}
var group Group
err = group.fromData(groupData)
if err != nil {
return nil, err
}
return &group, nil
}
// GetAllGroupsOptions is the set of options available to the group manager GetAll operation.
type GetAllGroupsOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// GetAllGroups fetches all groups from the server.
func (um *UserManager) GetAllGroups(opts *GetAllGroupsOptions) ([]Group, error) {
if opts == nil {
opts = &GetAllGroupsOptions{}
}
span := um.tracer.StartSpan("GetAllGroups", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "GET",
Path: "/settings/rbac/groups",
RetryStrategy: opts.RetryStrategy,
IsIdempotent: true,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return nil, makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return nil, usrErr
}
return nil, makeMgmtBadStatusError("failed to get all groups", &req, resp)
}
var groupDatas []jsonGroup
jsonDec := json.NewDecoder(resp.Body)
err = jsonDec.Decode(&groupDatas)
if err != nil {
return nil, err
}
groups := make([]Group, len(groupDatas))
for groupIdx, groupData := range groupDatas {
err = groups[groupIdx].fromData(groupData)
if err != nil {
return nil, err
}
}
return groups, nil
}
// UpsertGroupOptions is the set of options available to the group manager Upsert operation.
type UpsertGroupOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// UpsertGroup creates, or updates, a group on the server.
func (um *UserManager) UpsertGroup(group Group, opts *UpsertGroupOptions) error {
if group.Name == "" {
return makeInvalidArgumentsError("group name cannot be empty")
}
if opts == nil {
opts = &UpsertGroupOptions{}
}
span := um.tracer.StartSpan("UpsertGroup", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
var reqRoleStrs []string
for _, roleData := range group.Roles {
if roleData.Bucket == "" {
reqRoleStrs = append(reqRoleStrs, roleData.Name)
} else {
reqRoleStrs = append(reqRoleStrs, fmt.Sprintf("%s[%s]", roleData.Name, roleData.Bucket))
}
}
reqForm := make(url.Values)
reqForm.Add("description", group.Description)
reqForm.Add("ldap_group_ref", group.LDAPGroupReference)
reqForm.Add("roles", strings.Join(reqRoleStrs, ","))
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "PUT",
Path: fmt.Sprintf("/settings/rbac/groups/%s", group.Name),
Body: []byte(reqForm.Encode()),
ContentType: "application/x-www-form-urlencoded",
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to upsert group", &req, resp)
}
return nil
}
// DropGroupOptions is the set of options available to the group manager Drop operation.
type DropGroupOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
}
// DropGroup removes a group from the server.
func (um *UserManager) DropGroup(groupName string, opts *DropGroupOptions) error {
if groupName == "" {
return makeInvalidArgumentsError("groupName cannot be empty")
}
if opts == nil {
opts = &DropGroupOptions{}
}
span := um.tracer.StartSpan("DropGroup", nil).
SetTag("couchbase.service", "mgmt")
defer span.Finish()
req := mgmtRequest{
Service: ServiceTypeManagement,
Method: "DELETE",
Path: fmt.Sprintf("/settings/rbac/groups/%s", groupName),
RetryStrategy: opts.RetryStrategy,
UniqueID: uuid.New().String(),
Timeout: opts.Timeout,
parentSpan: span.Context(),
}
resp, err := um.provider.executeMgmtRequest(req)
if err != nil {
return makeGenericMgmtError(err, &req, resp)
}
defer ensureBodyClosed(resp.Body)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
usrErr := um.tryParseErrorMessage(&req, resp)
if usrErr != nil {
return usrErr
}
return makeMgmtBadStatusError("failed to drop group", &req, resp)
}
return nil
}

View File

@@ -1,75 +0,0 @@
package gocb
import "time"
type kvTimeoutsConfig struct {
KVTimeout time.Duration
KVDurableTimeout time.Duration
}
// Collection represents a single collection.
type Collection struct {
collectionName string
scope string
bucket *Bucket
timeoutsConfig kvTimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
tracer requestTracer
useMutationTokens bool
getKvProvider func() (kvProvider, error)
}
func newCollection(scope *Scope, collectionName string) *Collection {
return &Collection{
collectionName: collectionName,
scope: scope.Name(),
bucket: scope.bucket,
timeoutsConfig: scope.timeoutsConfig,
transcoder: scope.transcoder,
retryStrategyWrapper: scope.retryStrategyWrapper,
tracer: scope.tracer,
useMutationTokens: scope.useMutationTokens,
getKvProvider: scope.getKvProvider,
}
}
func (c *Collection) name() string {
return c.collectionName
}
// ScopeName returns the name of the scope to which this collection belongs.
// UNCOMMITTED: This API may change in the future.
func (c *Collection) ScopeName() string {
return c.scope
}
// Bucket returns the name of the bucket to which this collection belongs.
// UNCOMMITTED: This API may change in the future.
func (c *Collection) Bucket() *Bucket {
return c.bucket
}
// Name returns the name of the collection.
func (c *Collection) Name() string {
return c.collectionName
}
func (c *Collection) startKvOpTrace(operationName string, tracectx requestSpanContext) requestSpan {
return c.tracer.StartSpan(operationName, tracectx).
SetTag("couchbase.bucket", c.bucket).
SetTag("couchbase.collection", c.collectionName).
SetTag("couchbase.service", "kv")
}
func (c *Collection) bucketName() string {
return c.bucket.Name()
}

View File

@@ -1,312 +0,0 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// BinaryCollection is a set of binary operations.
type BinaryCollection struct {
collection *Collection
}
// AppendOptions are the options available to the Append operation.
type AppendOptions struct {
Timeout time.Duration
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryAppend(id string, val []byte, opts *AppendOptions) (mutOut *MutationResult, errOut error) {
if opts == nil {
opts = &AppendOptions{}
}
opm := c.newKvOpManager("Append", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Append(gocbcore.AdjoinOptions{
Key: opm.DocumentID(),
Value: val,
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.AdjoinResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
mutOut = &MutationResult{}
mutOut.cas = Cas(res.Cas)
mutOut.mt = opm.EnhanceMt(res.MutationToken)
opm.Resolve(mutOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Append appends a byte value to a document.
func (c *BinaryCollection) Append(id string, val []byte, opts *AppendOptions) (mutOut *MutationResult, errOut error) {
return c.collection.binaryAppend(id, val, opts)
}
// PrependOptions are the options available to the Prepend operation.
type PrependOptions struct {
Timeout time.Duration
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryPrepend(id string, val []byte, opts *PrependOptions) (mutOut *MutationResult, errOut error) {
if opts == nil {
opts = &PrependOptions{}
}
opm := c.newKvOpManager("Prepend", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Prepend(gocbcore.AdjoinOptions{
Key: opm.DocumentID(),
Value: val,
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.AdjoinResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
mutOut = &MutationResult{}
mutOut.cas = Cas(res.Cas)
mutOut.mt = opm.EnhanceMt(res.MutationToken)
opm.Resolve(mutOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Prepend prepends a byte value to a document.
func (c *BinaryCollection) Prepend(id string, val []byte, opts *PrependOptions) (mutOut *MutationResult, errOut error) {
return c.collection.binaryPrepend(id, val, opts)
}
// IncrementOptions are the options available to the Increment operation.
type IncrementOptions struct {
Timeout time.Duration
// Expiry is the length of time that the document will be stored in Couchbase.
// A value of 0 will set the document to never expire.
Expiry time.Duration
// Initial, if non-negative, is the `initial` value to use for the document if it does not exist.
// If present, this is the value that will be returned by a successful operation.
Initial int64
// Delta is the value to use for incrementing/decrementing if Initial is not present.
Delta uint64
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryIncrement(id string, opts *IncrementOptions) (countOut *CounterResult, errOut error) {
if opts == nil {
opts = &IncrementOptions{}
}
opm := c.newKvOpManager("Increment", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if opts.Initial >= 0 {
realInitial = uint64(opts.Initial)
}
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Increment(gocbcore.CounterOptions{
Key: opm.DocumentID(),
Delta: opts.Delta,
Initial: realInitial,
Expiry: durationToExpiry(opts.Expiry),
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.CounterResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
countOut = &CounterResult{}
countOut.cas = Cas(res.Cas)
countOut.mt = opm.EnhanceMt(res.MutationToken)
countOut.content = res.Value
opm.Resolve(countOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Increment performs an atomic addition for an integer document. Passing a
// non-negative `initial` value will cause the document to be created if it did not
// already exist.
func (c *BinaryCollection) Increment(id string, opts *IncrementOptions) (countOut *CounterResult, errOut error) {
return c.collection.binaryIncrement(id, opts)
}
// DecrementOptions are the options available to the Decrement operation.
type DecrementOptions struct {
Timeout time.Duration
// Expiry is the length of time that the document will be stored in Couchbase.
// A value of 0 will set the document to never expire.
Expiry time.Duration
// Initial, if non-negative, is the `initial` value to use for the document if it does not exist.
// If present, this is the value that will be returned by a successful operation.
Initial int64
// Delta is the value to use for incrementing/decrementing if Initial is not present.
Delta uint64
DurabilityLevel DurabilityLevel
PersistTo uint
ReplicateTo uint
Cas Cas
RetryStrategy RetryStrategy
}
func (c *Collection) binaryDecrement(id string, opts *DecrementOptions) (countOut *CounterResult, errOut error) {
if opts == nil {
opts = &DecrementOptions{}
}
opm := c.newKvOpManager("Decrement", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if opts.Initial >= 0 {
realInitial = uint64(opts.Initial)
}
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.Decrement(gocbcore.CounterOptions{
Key: opm.DocumentID(),
Delta: opts.Delta,
Initial: realInitial,
Expiry: durationToExpiry(opts.Expiry),
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
Cas: gocbcore.Cas(opts.Cas),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.CounterResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
countOut = &CounterResult{}
countOut.cas = Cas(res.Cas)
countOut.mt = opm.EnhanceMt(res.MutationToken)
countOut.content = res.Value
opm.Resolve(countOut.mt)
}))
if err != nil {
errOut = err
}
return
}
// Decrement performs an atomic subtraction for an integer document. Passing a
// non-negative `initial` value will cause the document to be created if it did not
// already exist.
func (c *BinaryCollection) Decrement(id string, opts *DecrementOptions) (countOut *CounterResult, errOut error) {
return c.collection.binaryDecrement(id, opts)
}

View File

@@ -1,745 +0,0 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
)
type bulkOp struct {
pendop gocbcore.PendingOp
span requestSpan
}
func (op *bulkOp) cancel() {
op.pendop.Cancel()
}
func (op *bulkOp) finish() {
op.span.Finish()
}
// BulkOp represents a single operation that can be submitted (within a list of more operations) to .Do()
// You can create a bulk operation by instantiating one of the implementations of BulkOp,
// such as GetOp, UpsertOp, ReplaceOp, and more.
// UNCOMMITTED: This API may change in the future.
type BulkOp interface {
execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan)
markError(err error)
cancel()
finish()
}
// BulkOpOptions are the set of options available when performing BulkOps using Do.
type BulkOpOptions struct {
Timeout time.Duration
Transcoder Transcoder
RetryStrategy RetryStrategy
}
// Do execute one or more `BulkOp` items in parallel.
// UNCOMMITTED: This API may change in the future.
func (c *Collection) Do(ops []BulkOp, opts *BulkOpOptions) error {
if opts == nil {
opts = &BulkOpOptions{}
}
span := c.startKvOpTrace("Do", nil)
timeout := opts.Timeout
if opts.Timeout == 0 {
timeout = c.timeoutsConfig.KVTimeout * time.Duration(len(ops))
}
retryWrapper := c.retryStrategyWrapper
if opts.RetryStrategy != nil {
retryWrapper = newRetryStrategyWrapper(opts.RetryStrategy)
}
if opts.Transcoder == nil {
opts.Transcoder = c.transcoder
}
agent, err := c.getKvProvider()
if err != nil {
return err
}
// Make the channel big enough to hold all our ops in case
// we get delayed inside execute (don't want to block the
// individual op handlers when they dispatch their signal).
signal := make(chan BulkOp, len(ops))
for _, item := range ops {
item.execute(span.Context(), c, agent, opts.Transcoder, signal, retryWrapper, time.Now().Add(timeout), c.startKvOpTrace)
}
for range ops {
item := <-signal
// We're really just clearing the pendop from this thread,
// since it already completed, no cancel actually occurs
item.finish()
}
return nil
}
// GetOp represents a type of `BulkOp` used for Get operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type GetOp struct {
bulkOp
ID string
Result *GetResult
Err error
}
func (item *GetOp) markError(err error) {
item.Err = err
}
func (item *GetOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("GetOp", tracectx)
item.bulkOp.span = span
op, err := provider.Get(gocbcore.GetOptions{
Key: []byte(item.ID),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.GetResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &GetResult{
Result: Result{
cas: Cas(res.Cas),
},
transcoder: transcoder,
contents: res.Value,
flags: res.Flags,
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// GetAndTouchOp represents a type of `BulkOp` used for GetAndTouch operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type GetAndTouchOp struct {
bulkOp
ID string
Expiry time.Duration
Result *GetResult
Err error
}
func (item *GetAndTouchOp) markError(err error) {
item.Err = err
}
func (item *GetAndTouchOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("GetAndTouchOp", tracectx)
item.bulkOp.span = span
op, err := provider.GetAndTouch(gocbcore.GetAndTouchOptions{
Key: []byte(item.ID),
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.GetAndTouchResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &GetResult{
Result: Result{
cas: Cas(res.Cas),
},
transcoder: transcoder,
contents: res.Value,
flags: res.Flags,
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// TouchOp represents a type of `BulkOp` used for Touch operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type TouchOp struct {
bulkOp
ID string
Expiry time.Duration
Result *MutationResult
Err error
}
func (item *TouchOp) markError(err error) {
item.Err = err
}
func (item *TouchOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("TouchOp", tracectx)
item.bulkOp.span = span
op, err := provider.Touch(gocbcore.TouchOptions{
Key: []byte(item.ID),
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.TouchResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// RemoveOp represents a type of `BulkOp` used for Remove operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type RemoveOp struct {
bulkOp
ID string
Cas Cas
Result *MutationResult
Err error
}
func (item *RemoveOp) markError(err error) {
item.Err = err
}
func (item *RemoveOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("RemoveOp", tracectx)
item.bulkOp.span = span
op, err := provider.Delete(gocbcore.DeleteOptions{
Key: []byte(item.ID),
Cas: gocbcore.Cas(item.Cas),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.DeleteResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// UpsertOp represents a type of `BulkOp` used for Upsert operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type UpsertOp struct {
bulkOp
ID string
Value interface{}
Expiry time.Duration
Cas Cas
Result *MutationResult
Err error
}
func (item *UpsertOp) markError(err error) {
item.Err = err
}
func (item *UpsertOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder,
signal chan BulkOp, retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("UpsertOp", tracectx)
item.bulkOp.span = span
etrace := c.startKvOpTrace("encode", span.Context())
bytes, flags, err := transcoder.Encode(item.Value)
etrace.Finish()
if err != nil {
item.Err = err
signal <- item
return
}
op, err := provider.Set(gocbcore.SetOptions{
Key: []byte(item.ID),
Value: bytes,
Flags: flags,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.StoreResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// InsertOp represents a type of `BulkOp` used for Insert operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type InsertOp struct {
bulkOp
ID string
Value interface{}
Expiry time.Duration
Result *MutationResult
Err error
}
func (item *InsertOp) markError(err error) {
item.Err = err
}
func (item *InsertOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("InsertOp", tracectx)
item.bulkOp.span = span
etrace := c.startKvOpTrace("encode", span.Context())
bytes, flags, err := transcoder.Encode(item.Value)
if err != nil {
etrace.Finish()
item.Err = err
signal <- item
return
}
etrace.Finish()
op, err := provider.Add(gocbcore.AddOptions{
Key: []byte(item.ID),
Value: bytes,
Flags: flags,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.StoreResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// ReplaceOp represents a type of `BulkOp` used for Replace operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type ReplaceOp struct {
bulkOp
ID string
Value interface{}
Expiry time.Duration
Cas Cas
Result *MutationResult
Err error
}
func (item *ReplaceOp) markError(err error) {
item.Err = err
}
func (item *ReplaceOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("ReplaceOp", tracectx)
item.bulkOp.span = span
etrace := c.startKvOpTrace("encode", span.Context())
bytes, flags, err := transcoder.Encode(item.Value)
if err != nil {
etrace.Finish()
item.Err = err
signal <- item
return
}
etrace.Finish()
op, err := provider.Replace(gocbcore.ReplaceOptions{
Key: []byte(item.ID),
Value: bytes,
Flags: flags,
Cas: gocbcore.Cas(item.Cas),
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.StoreResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// AppendOp represents a type of `BulkOp` used for Append operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type AppendOp struct {
bulkOp
ID string
Value string
Result *MutationResult
Err error
}
func (item *AppendOp) markError(err error) {
item.Err = err
}
func (item *AppendOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("AppendOp", tracectx)
item.bulkOp.span = span
op, err := provider.Append(gocbcore.AdjoinOptions{
Key: []byte(item.ID),
Value: []byte(item.Value),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.AdjoinResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// PrependOp represents a type of `BulkOp` used for Prepend operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type PrependOp struct {
bulkOp
ID string
Value string
Result *MutationResult
Err error
}
func (item *PrependOp) markError(err error) {
item.Err = err
}
func (item *PrependOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("PrependOp", tracectx)
item.bulkOp.span = span
op, err := provider.Prepend(gocbcore.AdjoinOptions{
Key: []byte(item.ID),
Value: []byte(item.Value),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.AdjoinResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// IncrementOp represents a type of `BulkOp` used for Increment operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type IncrementOp struct {
bulkOp
ID string
Delta int64
Initial int64
Expiry time.Duration
Result *CounterResult
Err error
}
func (item *IncrementOp) markError(err error) {
item.Err = err
}
func (item *IncrementOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("IncrementOp", tracectx)
item.bulkOp.span = span
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if item.Initial > 0 {
realInitial = uint64(item.Initial)
}
op, err := provider.Increment(gocbcore.CounterOptions{
Key: []byte(item.ID),
Delta: uint64(item.Delta),
Initial: realInitial,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.CounterResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &CounterResult{
MutationResult: MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
},
content: res.Value,
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}
// DecrementOp represents a type of `BulkOp` used for Decrement operations. See BulkOp.
// UNCOMMITTED: This API may change in the future.
type DecrementOp struct {
bulkOp
ID string
Delta int64
Initial int64
Expiry time.Duration
Result *CounterResult
Err error
}
func (item *DecrementOp) markError(err error) {
item.Err = err
}
func (item *DecrementOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp,
retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) {
span := startSpanFunc("DecrementOp", tracectx)
item.bulkOp.span = span
realInitial := uint64(0xFFFFFFFFFFFFFFFF)
if item.Initial > 0 {
realInitial = uint64(item.Initial)
}
op, err := provider.Decrement(gocbcore.CounterOptions{
Key: []byte(item.ID),
Delta: uint64(item.Delta),
Initial: realInitial,
Expiry: durationToExpiry(item.Expiry),
CollectionName: c.name(),
ScopeName: c.ScopeName(),
RetryStrategy: retryWrapper,
TraceContext: span.Context(),
Deadline: deadline,
}, func(res *gocbcore.CounterResult, err error) {
item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID)
if item.Err == nil {
item.Result = &CounterResult{
MutationResult: MutationResult{
Result: Result{
cas: Cas(res.Cas),
},
},
content: res.Value,
}
if res.MutationToken.VbUUID != 0 {
mutTok := &MutationToken{
token: res.MutationToken,
bucketName: c.bucketName(),
}
item.Result.mt = mutTok
}
}
signal <- item
})
if err != nil {
item.Err = err
signal <- item
} else {
item.bulkOp.pendop = op
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,476 +0,0 @@
package gocb
import (
"errors"
"fmt"
)
// CouchbaseList represents a list document.
type CouchbaseList struct {
collection *Collection
id string
}
// List returns a new CouchbaseList for the document specified by id.
func (c *Collection) List(id string) *CouchbaseList {
return &CouchbaseList{
collection: c,
id: id,
}
}
// Iterator returns an iterable for all items in the list.
func (cl *CouchbaseList) Iterator() ([]interface{}, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var listContents []interface{}
err = content.Content(&listContents)
if err != nil {
return nil, err
}
return listContents, nil
}
// At retrieves the value specified at the given index from the list.
func (cl *CouchbaseList) At(index int, valuePtr interface{}) error {
ops := make([]LookupInSpec, 1)
ops[0] = GetSpec(fmt.Sprintf("[%d]", index), nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return err
}
return result.ContentAt(0, valuePtr)
}
// RemoveAt removes the value specified at the given index from the list.
func (cl *CouchbaseList) RemoveAt(index int) error {
ops := make([]MutateInSpec, 1)
ops[0] = RemoveSpec(fmt.Sprintf("[%d]", index), nil)
_, err := cl.collection.MutateIn(cl.id, ops, nil)
if err != nil {
return err
}
return nil
}
// Append appends an item to the list.
func (cl *CouchbaseList) Append(val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = ArrayAppendSpec("", val, nil)
_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// Prepend prepends an item to the list.
func (cl *CouchbaseList) Prepend(val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = ArrayPrependSpec("", val, nil)
_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// IndexOf gets the index of the item in the list.
func (cl *CouchbaseList) IndexOf(val interface{}) (int, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return 0, err
}
var listContents []interface{}
err = content.Content(&listContents)
if err != nil {
return 0, err
}
for i, item := range listContents {
if item == val {
return i, nil
}
}
return -1, nil
}
// Size returns the size of the list.
func (cl *CouchbaseList) Size() (int, error) {
ops := make([]LookupInSpec, 1)
ops[0] = CountSpec("", nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return 0, err
}
var count int
err = result.ContentAt(0, &count)
if err != nil {
return 0, err
}
return count, nil
}
// Clear clears a list, also removing it.
func (cl *CouchbaseList) Clear() error {
_, err := cl.collection.Remove(cl.id, nil)
if err != nil {
return err
}
return nil
}
// CouchbaseMap represents a map document.
type CouchbaseMap struct {
collection *Collection
id string
}
// Map returns a new CouchbaseMap.
func (c *Collection) Map(id string) *CouchbaseMap {
return &CouchbaseMap{
collection: c,
id: id,
}
}
// Iterator returns an iterable for all items in the map.
func (cl *CouchbaseMap) Iterator() (map[string]interface{}, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var mapContents map[string]interface{}
err = content.Content(&mapContents)
if err != nil {
return nil, err
}
return mapContents, nil
}
// At retrieves the item for the given id from the map.
func (cl *CouchbaseMap) At(id string, valuePtr interface{}) error {
ops := make([]LookupInSpec, 1)
ops[0] = GetSpec(fmt.Sprintf("[%s]", id), nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return err
}
return result.ContentAt(0, valuePtr)
}
// Add adds an item to the map.
func (cl *CouchbaseMap) Add(id string, val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = UpsertSpec(id, val, nil)
_, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// Remove removes an item from the map.
func (cl *CouchbaseMap) Remove(id string) error {
ops := make([]MutateInSpec, 1)
ops[0] = RemoveSpec(id, nil)
_, err := cl.collection.MutateIn(cl.id, ops, nil)
if err != nil {
return err
}
return nil
}
// Exists verifies whether or a id exists in the map.
func (cl *CouchbaseMap) Exists(id string) (bool, error) {
ops := make([]LookupInSpec, 1)
ops[0] = ExistsSpec(fmt.Sprintf("[%s]", id), nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return false, err
}
return result.Exists(0), nil
}
// Size returns the size of the map.
func (cl *CouchbaseMap) Size() (int, error) {
ops := make([]LookupInSpec, 1)
ops[0] = CountSpec("", nil)
result, err := cl.collection.LookupIn(cl.id, ops, nil)
if err != nil {
return 0, err
}
var count int
err = result.ContentAt(0, &count)
if err != nil {
return 0, err
}
return count, nil
}
// Keys returns all of the keys within the map.
func (cl *CouchbaseMap) Keys() ([]string, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var mapContents map[string]interface{}
err = content.Content(&mapContents)
if err != nil {
return nil, err
}
var keys []string
for id := range mapContents {
keys = append(keys, id)
}
return keys, nil
}
// Values returns all of the values within the map.
func (cl *CouchbaseMap) Values() ([]interface{}, error) {
content, err := cl.collection.Get(cl.id, nil)
if err != nil {
return nil, err
}
var mapContents map[string]interface{}
err = content.Content(&mapContents)
if err != nil {
return nil, err
}
var values []interface{}
for _, val := range mapContents {
values = append(values, val)
}
return values, nil
}
// Clear clears a map, also removing it.
func (cl *CouchbaseMap) Clear() error {
_, err := cl.collection.Remove(cl.id, nil)
if err != nil {
return err
}
return nil
}
// CouchbaseSet represents a set document.
type CouchbaseSet struct {
id string
underlying *CouchbaseList
}
// Set returns a new CouchbaseSet.
func (c *Collection) Set(id string) *CouchbaseSet {
return &CouchbaseSet{
id: id,
underlying: c.List(id),
}
}
// Iterator returns an iterable for all items in the set.
func (cs *CouchbaseSet) Iterator() ([]interface{}, error) {
return cs.underlying.Iterator()
}
// Add adds a value to the set.
func (cs *CouchbaseSet) Add(val interface{}) error {
ops := make([]MutateInSpec, 1)
ops[0] = ArrayAddUniqueSpec("", val, nil)
_, err := cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert})
if err != nil {
return err
}
return nil
}
// Remove removes an value from the set.
func (cs *CouchbaseSet) Remove(val string) error {
for i := 0; i < 16; i++ {
content, err := cs.underlying.collection.Get(cs.id, nil)
if err != nil {
return err
}
cas := content.Cas()
var setContents []interface{}
err = content.Content(&setContents)
if err != nil {
return err
}
indexToRemove := -1
for i, item := range setContents {
if item == val {
indexToRemove = i
}
}
if indexToRemove > -1 {
ops := make([]MutateInSpec, 1)
ops[0] = RemoveSpec(fmt.Sprintf("[%d]", indexToRemove), nil)
_, err = cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{Cas: cas})
if errors.Is(err, ErrCasMismatch) {
continue
}
if err != nil {
return err
}
}
return nil
}
return errors.New("failed to perform operation after 16 retries")
}
// Values returns all of the values within the set.
func (cs *CouchbaseSet) Values() ([]interface{}, error) {
content, err := cs.underlying.collection.Get(cs.id, nil)
if err != nil {
return nil, err
}
var setContents []interface{}
err = content.Content(&setContents)
if err != nil {
return nil, err
}
return setContents, nil
}
// Contains verifies whether or not a value exists within the set.
func (cs *CouchbaseSet) Contains(val string) (bool, error) {
content, err := cs.underlying.collection.Get(cs.id, nil)
if err != nil {
return false, err
}
var setContents []interface{}
err = content.Content(&setContents)
if err != nil {
return false, err
}
for _, item := range setContents {
if item == val {
return true, nil
}
}
return false, nil
}
// Size returns the size of the set
func (cs *CouchbaseSet) Size() (int, error) {
return cs.underlying.Size()
}
// Clear clears a set, also removing it.
func (cs *CouchbaseSet) Clear() error {
err := cs.underlying.Clear()
if err != nil {
return err
}
return nil
}
// CouchbaseQueue represents a queue document.
type CouchbaseQueue struct {
id string
underlying *CouchbaseList
}
// Queue returns a new CouchbaseQueue.
func (c *Collection) Queue(id string) *CouchbaseQueue {
return &CouchbaseQueue{
id: id,
underlying: c.List(id),
}
}
// Iterator returns an iterable for all items in the queue.
func (cs *CouchbaseQueue) Iterator() ([]interface{}, error) {
return cs.underlying.Iterator()
}
// Push pushes a value onto the queue.
func (cs *CouchbaseQueue) Push(val interface{}) error {
return cs.underlying.Prepend(val)
}
// Pop pops an items off of the queue.
func (cs *CouchbaseQueue) Pop(valuePtr interface{}) error {
for i := 0; i < 16; i++ {
ops := make([]LookupInSpec, 1)
ops[0] = GetSpec("[-1]", nil)
content, err := cs.underlying.collection.LookupIn(cs.id, ops, nil)
if err != nil {
return err
}
cas := content.Cas()
err = content.ContentAt(0, valuePtr)
if err != nil {
return err
}
mutateOps := make([]MutateInSpec, 1)
mutateOps[0] = RemoveSpec("[-1]", nil)
_, err = cs.underlying.collection.MutateIn(cs.id, mutateOps, &MutateInOptions{Cas: cas})
if errors.Is(err, ErrCasMismatch) {
continue
}
if err != nil {
return err
}
return nil
}
return errors.New("failed to perform operation after 16 retries")
}
// Size returns the size of the queue.
func (cs *CouchbaseQueue) Size() (int, error) {
return cs.underlying.Size()
}
// Clear clears a queue, also removing it.
func (cs *CouchbaseQueue) Clear() error {
err := cs.underlying.Clear()
if err != nil {
return err
}
return nil
}

View File

@@ -1,174 +0,0 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
func (c *Collection) observeOnceSeqNo(
tracectx requestSpanContext,
docID string,
mt gocbcore.MutationToken,
replicaIdx int,
cancelCh chan struct{},
timeout time.Duration,
) (didReplicate, didPersist bool, errOut error) {
opm := c.newKvOpManager("observeOnceSeqNo", tracectx)
defer opm.Finish()
opm.SetDocumentID(docID)
opm.SetCancelCh(cancelCh)
opm.SetTimeout(timeout)
agent, err := c.getKvProvider()
if err != nil {
return false, false, err
}
err = opm.Wait(agent.ObserveVb(gocbcore.ObserveVbOptions{
VbID: mt.VbID,
VbUUID: mt.VbUUID,
ReplicaIdx: replicaIdx,
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.ObserveVbResult, err error) {
if err != nil || res == nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
didReplicate = res.CurrentSeqNo >= mt.SeqNo
didPersist = res.PersistSeqNo >= mt.SeqNo
opm.Resolve(nil)
}))
if err != nil {
errOut = err
}
return
}
func (c *Collection) observeOne(
tracectx requestSpanContext,
docID string,
mt gocbcore.MutationToken,
replicaIdx int,
replicaCh, persistCh chan struct{},
cancelCh chan struct{},
timeout time.Duration,
) {
sentReplicated := false
sentPersisted := false
calc := gocbcore.ExponentialBackoff(10*time.Microsecond, 100*time.Millisecond, 0)
retries := uint32(0)
ObserveLoop:
for {
select {
case <-cancelCh:
break ObserveLoop
default:
// not cancelled yet
}
didReplicate, didPersist, err := c.observeOnceSeqNo(tracectx, docID, mt, replicaIdx, cancelCh, timeout)
if err != nil {
logDebugf("ObserveOnce failed unexpected: %s", err)
return
}
if didReplicate && !sentReplicated {
replicaCh <- struct{}{}
sentReplicated = true
}
if didPersist && !sentPersisted {
persistCh <- struct{}{}
sentPersisted = true
}
// If we've got persisted and replicated, we can just stop
if sentPersisted && sentReplicated {
break ObserveLoop
}
waitTmr := gocbcore.AcquireTimer(calc(retries))
retries++
select {
case <-waitTmr.C:
gocbcore.ReleaseTimer(waitTmr, true)
case <-cancelCh:
gocbcore.ReleaseTimer(waitTmr, false)
}
}
}
func (c *Collection) waitForDurability(
tracectx requestSpanContext,
docID string,
mt gocbcore.MutationToken,
replicateTo uint,
persistTo uint,
deadline time.Time,
cancelCh chan struct{},
) error {
opm := c.newKvOpManager("waitForDurability", tracectx)
defer opm.Finish()
opm.SetDocumentID(docID)
agent, err := c.getKvProvider()
if err != nil {
return err
}
snapshot, err := agent.ConfigSnapshot()
if err != nil {
return err
}
numReplicas, err := snapshot.NumReplicas()
if err != nil {
return err
}
numServers := numReplicas + 1
if replicateTo > uint(numServers-1) || persistTo > uint(numServers) {
return opm.EnhanceErr(ErrDurabilityImpossible)
}
subOpCancelCh := make(chan struct{}, 1)
replicaCh := make(chan struct{}, numServers)
persistCh := make(chan struct{}, numServers)
for replicaIdx := 0; replicaIdx < numServers; replicaIdx++ {
go c.observeOne(opm.TraceSpan(), docID, mt, replicaIdx, replicaCh, persistCh, subOpCancelCh, time.Until(deadline))
}
numReplicated := uint(0)
numPersisted := uint(0)
for {
select {
case <-replicaCh:
numReplicated++
case <-persistCh:
numPersisted++
case <-time.After(time.Until(deadline)):
// deadline exceeded
close(subOpCancelCh)
return opm.EnhanceErr(ErrAmbiguousTimeout)
case <-cancelCh:
// parent asked for cancellation
close(subOpCancelCh)
return opm.EnhanceErr(ErrRequestCanceled)
}
if numReplicated >= replicateTo && numPersisted >= persistTo {
close(subOpCancelCh)
return nil
}
}
}

View File

@@ -1,316 +0,0 @@
package gocb
import (
"encoding/json"
"errors"
"time"
"github.com/couchbase/gocbcore/v9/memd"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// LookupInOptions are the set of options available to LookupIn.
type LookupInOptions struct {
Timeout time.Duration
RetryStrategy RetryStrategy
// Internal: This should never be used and is not supported.
Internal struct {
AccessDeleted bool
}
}
// LookupIn performs a set of subdocument lookup operations on the document identified by id.
func (c *Collection) LookupIn(id string, ops []LookupInSpec, opts *LookupInOptions) (docOut *LookupInResult, errOut error) {
if opts == nil {
opts = &LookupInOptions{}
}
opm := c.newKvOpManager("LookupIn", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
return c.internalLookupIn(opm, ops, opts.Internal.AccessDeleted)
}
func (c *Collection) internalLookupIn(
opm *kvOpManager,
ops []LookupInSpec,
accessDeleted bool,
) (docOut *LookupInResult, errOut error) {
var subdocs []gocbcore.SubDocOp
for _, op := range ops {
if op.op == memd.SubDocOpGet && op.path == "" {
if op.isXattr {
return nil, errors.New("invalid xattr fetch with no path")
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: memd.SubDocOpGetDoc,
Flags: memd.SubdocFlag(SubdocFlagNone),
})
continue
} else if op.op == memd.SubDocOpDictSet && op.path == "" {
if op.isXattr {
return nil, errors.New("invalid xattr set with no path")
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: memd.SubDocOpSetDoc,
Flags: memd.SubdocFlag(SubdocFlagNone),
})
continue
}
flags := memd.SubdocFlagNone
if op.isXattr {
flags |= memd.SubdocFlagXattrPath
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: op.op,
Path: op.path,
Flags: flags,
})
}
var flags memd.SubdocDocFlag
if accessDeleted {
flags = memd.SubdocDocFlagAccessDeleted
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.LookupIn(gocbcore.LookupInOptions{
Key: opm.DocumentID(),
Ops: subdocs,
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
Flags: flags,
}, func(res *gocbcore.LookupInResult, err error) {
if err != nil && res == nil {
errOut = opm.EnhanceErr(err)
}
if res != nil {
docOut = &LookupInResult{}
docOut.cas = Cas(res.Cas)
docOut.contents = make([]lookupInPartial, len(subdocs))
for i, opRes := range res.Ops {
docOut.contents[i].err = opm.EnhanceErr(opRes.Err)
docOut.contents[i].data = json.RawMessage(opRes.Value)
}
}
if err == nil {
opm.Resolve(nil)
} else {
opm.Reject()
}
}))
if err != nil {
errOut = err
}
return
}
// StoreSemantics is used to define the document level action to take during a MutateIn operation.
type StoreSemantics uint8
const (
// StoreSemanticsReplace signifies to Replace the document, and fail if it does not exist.
// This is the default action
StoreSemanticsReplace StoreSemantics = iota
// StoreSemanticsUpsert signifies to replace the document or create it if it doesn't exist.
StoreSemanticsUpsert
// StoreSemanticsInsert signifies to create the document, and fail if it exists.
StoreSemanticsInsert
)
// MutateInOptions are the set of options available to MutateIn.
type MutateInOptions struct {
Expiry time.Duration
Cas Cas
PersistTo uint
ReplicateTo uint
DurabilityLevel DurabilityLevel
StoreSemantic StoreSemantics
Timeout time.Duration
RetryStrategy RetryStrategy
// Internal: This should never be used and is not supported.
Internal struct {
AccessDeleted bool
}
}
// MutateIn performs a set of subdocument mutations on the document specified by id.
func (c *Collection) MutateIn(id string, ops []MutateInSpec, opts *MutateInOptions) (mutOut *MutateInResult, errOut error) {
if opts == nil {
opts = &MutateInOptions{}
}
opm := c.newKvOpManager("MutateIn", nil)
defer opm.Finish()
opm.SetDocumentID(id)
opm.SetRetryStrategy(opts.RetryStrategy)
opm.SetTimeout(opts.Timeout)
if err := opm.CheckReadyForOp(); err != nil {
return nil, err
}
return c.internalMutateIn(opm, opts.StoreSemantic, opts.Expiry, opts.Cas, ops, opts.Internal.AccessDeleted)
}
func jsonMarshalMultiArray(in interface{}) ([]byte, error) {
out, err := json.Marshal(in)
if err != nil {
return nil, err
}
// Assert first character is a '['
if len(out) < 2 || out[0] != '[' {
return nil, makeInvalidArgumentsError("not a JSON array")
}
out = out[1 : len(out)-1]
return out, nil
}
func jsonMarshalMutateSpec(op MutateInSpec) ([]byte, memd.SubdocFlag, error) {
if op.value == nil {
return nil, memd.SubdocFlagNone, nil
}
if macro, ok := op.value.(MutationMacro); ok {
return []byte(macro), memd.SubdocFlagExpandMacros | memd.SubdocFlagXattrPath, nil
}
if op.multiValue {
bytes, err := jsonMarshalMultiArray(op.value)
return bytes, memd.SubdocFlagNone, err
}
bytes, err := json.Marshal(op.value)
return bytes, memd.SubdocFlagNone, err
}
func (c *Collection) internalMutateIn(
opm *kvOpManager,
action StoreSemantics,
expiry time.Duration,
cas Cas,
ops []MutateInSpec,
accessDeleted bool,
) (mutOut *MutateInResult, errOut error) {
var docFlags memd.SubdocDocFlag
if action == StoreSemanticsReplace {
// this is the default behaviour
} else if action == StoreSemanticsUpsert {
docFlags |= memd.SubdocDocFlagMkDoc
} else if action == StoreSemanticsInsert {
docFlags |= memd.SubdocDocFlagAddDoc
} else {
return nil, makeInvalidArgumentsError("invalid StoreSemantics value provided")
}
if accessDeleted {
docFlags |= memd.SubdocDocFlagAccessDeleted
}
var subdocs []gocbcore.SubDocOp
for _, op := range ops {
if op.path == "" {
switch op.op {
case memd.SubDocOpDictAdd:
return nil, makeInvalidArgumentsError("cannot specify a blank path with InsertSpec")
case memd.SubDocOpDictSet:
return nil, makeInvalidArgumentsError("cannot specify a blank path with UpsertSpec")
case memd.SubDocOpDelete:
return nil, makeInvalidArgumentsError("cannot specify a blank path with DeleteSpec")
case memd.SubDocOpReplace:
op.op = memd.SubDocOpSetDoc
default:
}
}
etrace := c.startKvOpTrace("encode", opm.TraceSpan())
bytes, flags, err := jsonMarshalMutateSpec(op)
etrace.Finish()
if err != nil {
return nil, err
}
if op.createPath {
flags |= memd.SubdocFlagMkDirP
}
if op.isXattr {
flags |= memd.SubdocFlagXattrPath
}
subdocs = append(subdocs, gocbcore.SubDocOp{
Op: op.op,
Flags: flags,
Path: op.path,
Value: bytes,
})
}
agent, err := c.getKvProvider()
if err != nil {
return nil, err
}
err = opm.Wait(agent.MutateIn(gocbcore.MutateInOptions{
Key: opm.DocumentID(),
Flags: docFlags,
Cas: gocbcore.Cas(cas),
Ops: subdocs,
Expiry: durationToExpiry(expiry),
CollectionName: opm.CollectionName(),
ScopeName: opm.ScopeName(),
DurabilityLevel: opm.DurabilityLevel(),
DurabilityLevelTimeout: opm.DurabilityTimeout(),
RetryStrategy: opm.RetryStrategy(),
TraceContext: opm.TraceSpan(),
Deadline: opm.Deadline(),
}, func(res *gocbcore.MutateInResult, err error) {
if err != nil {
errOut = opm.EnhanceErr(err)
opm.Reject()
return
}
mutOut = &MutateInResult{}
mutOut.cas = Cas(res.Cas)
mutOut.mt = opm.EnhanceMt(res.MutationToken)
mutOut.contents = make([]mutateInPartial, len(res.Ops))
for i, op := range res.Ops {
mutOut.contents[i] = mutateInPartial{data: op.Value}
}
opm.Resolve(mutOut.mt)
}))
if err != nil {
errOut = err
}
return
}

View File

@@ -1,203 +0,0 @@
package gocb
import (
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/couchbase/gocbcore/v9/memd"
)
const (
goCbVersionStr = "v2.1.4"
)
// QueryIndexType provides information on the type of indexer used for an index.
type QueryIndexType string
const (
// QueryIndexTypeGsi indicates that GSI was used to build the index.
QueryIndexTypeGsi QueryIndexType = "gsi"
// QueryIndexTypeView indicates that views were used to build the index.
QueryIndexTypeView QueryIndexType = "views"
)
// QueryStatus provides information about the current status of a query.
type QueryStatus string
const (
// QueryStatusRunning indicates the query is still running
QueryStatusRunning QueryStatus = "running"
// QueryStatusSuccess indicates the query was successful.
QueryStatusSuccess QueryStatus = "success"
// QueryStatusErrors indicates a query completed with errors.
QueryStatusErrors QueryStatus = "errors"
// QueryStatusCompleted indicates a query has completed.
QueryStatusCompleted QueryStatus = "completed"
// QueryStatusStopped indicates a query has been stopped.
QueryStatusStopped QueryStatus = "stopped"
// QueryStatusTimeout indicates a query timed out.
QueryStatusTimeout QueryStatus = "timeout"
// QueryStatusClosed indicates that a query was closed.
QueryStatusClosed QueryStatus = "closed"
// QueryStatusFatal indicates that a query ended with a fatal error.
QueryStatusFatal QueryStatus = "fatal"
// QueryStatusAborted indicates that a query was aborted.
QueryStatusAborted QueryStatus = "aborted"
// QueryStatusUnknown indicates that the query status is unknown.
QueryStatusUnknown QueryStatus = "unknown"
)
// ServiceType specifies a particular Couchbase service type.
type ServiceType gocbcore.ServiceType
const (
// ServiceTypeManagement represents a management service.
ServiceTypeManagement ServiceType = ServiceType(gocbcore.MgmtService)
// ServiceTypeKeyValue represents a memcached service.
ServiceTypeKeyValue ServiceType = ServiceType(gocbcore.MemdService)
// ServiceTypeViews represents a views service.
ServiceTypeViews ServiceType = ServiceType(gocbcore.CapiService)
// ServiceTypeQuery represents a query service.
ServiceTypeQuery ServiceType = ServiceType(gocbcore.N1qlService)
// ServiceTypeSearch represents a full-text-search service.
ServiceTypeSearch ServiceType = ServiceType(gocbcore.FtsService)
// ServiceTypeAnalytics represents an analytics service.
ServiceTypeAnalytics ServiceType = ServiceType(gocbcore.CbasService)
)
// QueryProfileMode specifies the profiling mode to use during a query.
type QueryProfileMode string
const (
// QueryProfileModeNone disables query profiling
QueryProfileModeNone QueryProfileMode = "off"
// QueryProfileModePhases includes phase profiling information in the query response
QueryProfileModePhases QueryProfileMode = "phases"
// QueryProfileModeTimings includes timing profiling information in the query response
QueryProfileModeTimings QueryProfileMode = "timings"
)
// SubdocFlag provides special handling flags for sub-document operations
type SubdocFlag memd.SubdocFlag
const (
// SubdocFlagNone indicates no special behaviours
SubdocFlagNone SubdocFlag = SubdocFlag(memd.SubdocFlagNone)
// SubdocFlagCreatePath indicates you wish to recursively create the tree of paths
// if it does not already exist within the document.
SubdocFlagCreatePath SubdocFlag = SubdocFlag(memd.SubdocFlagMkDirP)
// SubdocFlagXattr indicates your path refers to an extended attribute rather than the document.
SubdocFlagXattr SubdocFlag = SubdocFlag(memd.SubdocFlagXattrPath)
// SubdocFlagUseMacros indicates that you wish macro substitution to occur on the value
SubdocFlagUseMacros SubdocFlag = SubdocFlag(memd.SubdocFlagExpandMacros)
)
// SubdocDocFlag specifies document-level flags for a sub-document operation.
type SubdocDocFlag memd.SubdocDocFlag
const (
// SubdocDocFlagNone indicates no special behaviours
SubdocDocFlagNone SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagNone)
// SubdocDocFlagMkDoc indicates that the document should be created if it does not already exist.
SubdocDocFlagMkDoc SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagMkDoc)
// SubdocDocFlagAddDoc indices that the document should be created only if it does not already exist.
SubdocDocFlagAddDoc SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagAddDoc)
// SubdocDocFlagAccessDeleted indicates that you wish to receive soft-deleted documents.
SubdocDocFlagAccessDeleted SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagAccessDeleted)
)
// DurabilityLevel specifies the level of synchronous replication to use.
type DurabilityLevel uint8
const (
// DurabilityLevelMajority specifies that a mutation must be replicated (held in memory) to a majority of nodes.
DurabilityLevelMajority DurabilityLevel = iota + 1
// DurabilityLevelMajorityAndPersistOnMaster specifies that a mutation must be replicated (held in memory) to a
// majority of nodes and also persisted (written to disk) on the active node.
DurabilityLevelMajorityAndPersistOnMaster
// DurabilityLevelPersistToMajority specifies that a mutation must be persisted (written to disk) to a majority
// of nodes.
DurabilityLevelPersistToMajority
)
// MutationMacro can be supplied to MutateIn operations to perform ExpandMacros operations.
type MutationMacro string
const (
// MutationMacroCAS can be used to tell the server to use the CAS macro.
MutationMacroCAS MutationMacro = "\"${Mutation.CAS}\""
// MutationMacroSeqNo can be used to tell the server to use the seqno macro.
MutationMacroSeqNo MutationMacro = "\"${Mutation.seqno}\""
// MutationMacroValueCRC32c can be used to tell the server to use the value_crc32c macro.
MutationMacroValueCRC32c MutationMacro = "\"${Mutation.value_crc32c}\""
)
// ClusterState specifies the current state of the cluster
type ClusterState uint
const (
// ClusterStateOnline indicates that all nodes are online and reachable.
ClusterStateOnline ClusterState = iota + 1
// ClusterStateDegraded indicates that all services will function, but possibly not optimally.
ClusterStateDegraded
// ClusterStateOffline indicates that no nodes were reachable.
ClusterStateOffline
)
// EndpointState specifies the current state of an endpoint.
type EndpointState uint
const (
// EndpointStateDisconnected indicates the endpoint socket is unreachable.
EndpointStateDisconnected EndpointState = iota + 1
// EndpointStateConnecting indicates the endpoint socket is connecting.
EndpointStateConnecting
// EndpointStateConnected indicates the endpoint socket is connected and ready.
EndpointStateConnected
// EndpointStateDisconnecting indicates the endpoint socket is disconnecting.
EndpointStateDisconnecting
)
// PingState specifies the result of the ping operation
type PingState uint
const (
// PingStateOk indicates that the ping operation was successful.
PingStateOk PingState = iota + 1
// PingStateTimeout indicates that the ping operation timed out.
PingStateTimeout
// PingStateError indicates that the ping operation failed.
PingStateError
)

View File

@@ -1,57 +0,0 @@
package gocb
func serviceTypeToString(service ServiceType) string {
switch service {
case ServiceTypeManagement:
return "mgmt"
case ServiceTypeKeyValue:
return "kv"
case ServiceTypeViews:
return "views"
case ServiceTypeQuery:
return "query"
case ServiceTypeSearch:
return "search"
case ServiceTypeAnalytics:
return "analytics"
}
return ""
}
func clusterStateToString(state ClusterState) string {
switch state {
case ClusterStateOnline:
return "online"
case ClusterStateDegraded:
return "degraded"
case ClusterStateOffline:
return "offline"
}
return ""
}
func endpointStateToString(state EndpointState) string {
switch state {
case EndpointStateDisconnected:
return "disconnected"
case EndpointStateConnecting:
return "connecting"
case EndpointStateConnected:
return "connected"
case EndpointStateDisconnecting:
return "disconnecting"
}
return ""
}
func pingStateToString(state PingState) string {
switch state {
case PingStateOk:
return "ok"
case PingStateTimeout:
return "timeout"
case PingStateError:
return "error"
}
return ""
}

View File

@@ -1,299 +0,0 @@
package gocb
import (
"errors"
"fmt"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type wrappedError struct {
Message string
InnerError error
}
func (e wrappedError) Error() string {
return fmt.Sprintf("%s: %s", e.Message, e.InnerError.Error())
}
func (e wrappedError) Unwrap() error {
return e.InnerError
}
func wrapError(err error, message string) error {
return wrappedError{
Message: message,
InnerError: err,
}
}
type invalidArgumentsError struct {
message string
}
func (e invalidArgumentsError) Error() string {
return fmt.Sprintf("invalid arguments: %s", e.message)
}
func (e invalidArgumentsError) Unwrap() error {
return ErrInvalidArgument
}
func makeInvalidArgumentsError(message string) error {
return invalidArgumentsError{
message: message,
}
}
// Shared Error Definitions RFC#58@15
var (
// ErrTimeout occurs when an operation does not receive a response in a timely manner.
ErrTimeout = gocbcore.ErrTimeout
// ErrRequestCanceled occurs when an operation has been canceled.
ErrRequestCanceled = gocbcore.ErrRequestCanceled
// ErrInvalidArgument occurs when an invalid argument is provided for an operation.
ErrInvalidArgument = gocbcore.ErrInvalidArgument
// ErrServiceNotAvailable occurs when the requested service is not available.
ErrServiceNotAvailable = gocbcore.ErrServiceNotAvailable
// ErrInternalServerFailure occurs when the server encounters an internal server error.
ErrInternalServerFailure = gocbcore.ErrInternalServerFailure
// ErrAuthenticationFailure occurs when authentication has failed.
ErrAuthenticationFailure = gocbcore.ErrAuthenticationFailure
// ErrTemporaryFailure occurs when an operation has failed for a reason that is temporary.
ErrTemporaryFailure = gocbcore.ErrTemporaryFailure
// ErrParsingFailure occurs when a query has failed to be parsed by the server.
ErrParsingFailure = gocbcore.ErrParsingFailure
// ErrCasMismatch occurs when an operation has been performed with a cas value that does not the value on the server.
ErrCasMismatch = gocbcore.ErrCasMismatch
// ErrBucketNotFound occurs when the requested bucket could not be found.
ErrBucketNotFound = gocbcore.ErrBucketNotFound
// ErrCollectionNotFound occurs when the requested collection could not be found.
ErrCollectionNotFound = gocbcore.ErrCollectionNotFound
// ErrEncodingFailure occurs when encoding of a value failed.
ErrEncodingFailure = gocbcore.ErrEncodingFailure
// ErrDecodingFailure occurs when decoding of a value failed.
ErrDecodingFailure = gocbcore.ErrDecodingFailure
// ErrUnsupportedOperation occurs when an operation that is unsupported or unknown is performed against the server.
ErrUnsupportedOperation = gocbcore.ErrUnsupportedOperation
// ErrAmbiguousTimeout occurs when an operation does not receive a response in a timely manner for a reason that
//
ErrAmbiguousTimeout = gocbcore.ErrAmbiguousTimeout
// ErrAmbiguousTimeout occurs when an operation does not receive a response in a timely manner for a reason that
// it can be safely established that
ErrUnambiguousTimeout = gocbcore.ErrUnambiguousTimeout
// ErrFeatureNotAvailable occurs when an operation is performed on a bucket which does not support it.
ErrFeatureNotAvailable = gocbcore.ErrFeatureNotAvailable
// ErrScopeNotFound occurs when the requested scope could not be found.
ErrScopeNotFound = gocbcore.ErrScopeNotFound
// ErrIndexNotFound occurs when the requested index could not be found.
ErrIndexNotFound = gocbcore.ErrIndexNotFound
// ErrIndexExists occurs when creating an index that already exists.
ErrIndexExists = gocbcore.ErrIndexExists
)
// Key Value Error Definitions RFC#58@15
var (
// ErrDocumentNotFound occurs when the requested document could not be found.
ErrDocumentNotFound = gocbcore.ErrDocumentNotFound
// ErrDocumentUnretrievable occurs when GetAnyReplica cannot find the document on any replica.
ErrDocumentUnretrievable = gocbcore.ErrDocumentUnretrievable
// ErrDocumentLocked occurs when a mutation operation is attempted against a document that is locked.
ErrDocumentLocked = gocbcore.ErrDocumentLocked
// ErrValueTooLarge occurs when a document has gone over the maximum size allowed by the server.
ErrValueTooLarge = gocbcore.ErrValueTooLarge
// ErrDocumentExists occurs when an attempt is made to insert a document but a document with that key already exists.
ErrDocumentExists = gocbcore.ErrDocumentExists
// ErrValueNotJSON occurs when a sub-document operation is performed on a
// document which is not JSON.
ErrValueNotJSON = gocbcore.ErrValueNotJSON
// ErrDurabilityLevelNotAvailable occurs when an invalid durability level was requested.
ErrDurabilityLevelNotAvailable = gocbcore.ErrDurabilityLevelNotAvailable
// ErrDurabilityImpossible occurs when a request is performed with impossible
// durability level requirements.
ErrDurabilityImpossible = gocbcore.ErrDurabilityImpossible
// ErrDurabilityAmbiguous occurs when an SyncWrite does not complete in the specified
// time and the result is ambiguous.
ErrDurabilityAmbiguous = gocbcore.ErrDurabilityAmbiguous
// ErrDurableWriteInProgress occurs when an attempt is made to write to a key that has
// a SyncWrite pending.
ErrDurableWriteInProgress = gocbcore.ErrDurableWriteInProgress
// ErrDurableWriteReCommitInProgress occurs when an SyncWrite is being recommitted.
ErrDurableWriteReCommitInProgress = gocbcore.ErrDurableWriteReCommitInProgress
// ErrMutationLost occurs when a mutation was lost.
ErrMutationLost = gocbcore.ErrMutationLost
// ErrPathNotFound occurs when a sub-document operation targets a path
// which does not exist in the specified document.
ErrPathNotFound = gocbcore.ErrPathNotFound
// ErrPathMismatch occurs when a sub-document operation specifies a path
// which does not match the document structure (field access on an array).
ErrPathMismatch = gocbcore.ErrPathMismatch
// ErrPathInvalid occurs when a sub-document path could not be parsed.
ErrPathInvalid = gocbcore.ErrPathInvalid
// ErrPathTooBig occurs when a sub-document path is too big.
ErrPathTooBig = gocbcore.ErrPathTooBig
// ErrPathTooDeep occurs when an operation would cause a document to be
// nested beyond the depth limits allowed by the sub-document specification.
ErrPathTooDeep = gocbcore.ErrPathTooDeep
// ErrValueTooDeep occurs when a sub-document operation specifies a value
// which is deeper than the depth limits of the sub-document specification.
ErrValueTooDeep = gocbcore.ErrValueTooDeep
// ErrValueInvalid occurs when a sub-document operation could not insert.
ErrValueInvalid = gocbcore.ErrValueInvalid
// ErrDocumentNotJSON occurs when a sub-document operation is performed on a
// document which is not JSON.
ErrDocumentNotJSON = gocbcore.ErrDocumentNotJSON
// ErrNumberTooBig occurs when a sub-document operation is performed with
// a bad range.
ErrNumberTooBig = gocbcore.ErrNumberTooBig
// ErrDeltaInvalid occurs when a sub-document counter operation is performed
// and the specified delta is not valid.
ErrDeltaInvalid = gocbcore.ErrDeltaInvalid
// ErrPathExists occurs when a sub-document operation expects a path not
// to exists, but the path was found in the document.
ErrPathExists = gocbcore.ErrPathExists
// ErrXattrUnknownMacro occurs when an invalid macro value is specified.
ErrXattrUnknownMacro = gocbcore.ErrXattrUnknownMacro
// ErrXattrInvalidFlagCombo occurs when an invalid set of
// extended-attribute flags is passed to a sub-document operation.
ErrXattrInvalidFlagCombo = gocbcore.ErrXattrInvalidFlagCombo
// ErrXattrInvalidKeyCombo occurs when an invalid set of key operations
// are specified for a extended-attribute sub-document operation.
ErrXattrInvalidKeyCombo = gocbcore.ErrXattrInvalidKeyCombo
// ErrXattrUnknownVirtualAttribute occurs when an invalid virtual attribute is specified.
ErrXattrUnknownVirtualAttribute = gocbcore.ErrXattrUnknownVirtualAttribute
// ErrXattrCannotModifyVirtualAttribute occurs when a mutation is attempted upon
// a virtual attribute (which are immutable by definition).
ErrXattrCannotModifyVirtualAttribute = gocbcore.ErrXattrCannotModifyVirtualAttribute
// ErrXattrInvalidOrder occurs when a set key key operations are specified for a extended-attribute sub-document
// operation in the incorrect order.
ErrXattrInvalidOrder = gocbcore.ErrXattrInvalidOrder
)
// Query Error Definitions RFC#58@15
var (
// ErrPlanningFailure occurs when the query service was unable to create a query plan.
ErrPlanningFailure = gocbcore.ErrPlanningFailure
// ErrIndexFailure occurs when there was an issue with the index specified.
ErrIndexFailure = gocbcore.ErrIndexFailure
// ErrPreparedStatementFailure occurs when there was an issue with the prepared statement.
ErrPreparedStatementFailure = gocbcore.ErrPreparedStatementFailure
)
// Analytics Error Definitions RFC#58@15
var (
// ErrCompilationFailure occurs when there was an issue executing the analytics query because it could not
// be compiled.
ErrCompilationFailure = gocbcore.ErrCompilationFailure
// ErrJobQueueFull occurs when the analytics service job queue is full.
ErrJobQueueFull = gocbcore.ErrJobQueueFull
// ErrDatasetNotFound occurs when the analytics dataset requested could not be found.
ErrDatasetNotFound = gocbcore.ErrDatasetNotFound
// ErrDataverseNotFound occurs when the analytics dataverse requested could not be found.
ErrDataverseNotFound = gocbcore.ErrDataverseNotFound
// ErrDatasetExists occurs when creating an analytics dataset failed because it already exists.
ErrDatasetExists = gocbcore.ErrDatasetExists
// ErrDataverseExists occurs when creating an analytics dataverse failed because it already exists.
ErrDataverseExists = gocbcore.ErrDataverseExists
// ErrLinkNotFound occurs when the analytics link requested could not be found.
ErrLinkNotFound = gocbcore.ErrLinkNotFound
)
// Search Error Definitions RFC#58@15
var ()
// View Error Definitions RFC#58@15
var (
// ErrViewNotFound occurs when the view requested could not be found.
ErrViewNotFound = gocbcore.ErrViewNotFound
// ErrDesignDocumentNotFound occurs when the design document requested could not be found.
ErrDesignDocumentNotFound = gocbcore.ErrDesignDocumentNotFound
)
// Management Error Definitions RFC#58@15
var (
// ErrCollectionExists occurs when creating a collection failed because it already exists.
ErrCollectionExists = gocbcore.ErrCollectionExists
// ErrScopeExists occurs when creating a scope failed because it already exists.
ErrScopeExists = gocbcore.ErrScopeExists
// ErrUserNotFound occurs when the user requested could not be found.
ErrUserNotFound = gocbcore.ErrUserNotFound
// ErrGroupNotFound occurs when the group requested could not be found.
ErrGroupNotFound = gocbcore.ErrGroupNotFound
// ErrBucketExists occurs when creating a bucket failed because it already exists.
ErrBucketExists = gocbcore.ErrBucketExists
// ErrUserExists occurs when creating a user failed because it already exists.
ErrUserExists = gocbcore.ErrUserExists
// ErrBucketNotFlushable occurs when a bucket could not be flushed because flushing is not enabled.
ErrBucketNotFlushable = gocbcore.ErrBucketNotFlushable
)
// SDK specific error definitions
var (
// ErrOverload occurs when too many operations are dispatched and all queues are full.
ErrOverload = gocbcore.ErrOverload
// ErrNoResult occurs when no results are available to a query.
ErrNoResult = errors.New("no result was available")
)

View File

@@ -1,42 +0,0 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// AnalyticsErrorDesc represents a specific error returned from the analytics service.
type AnalyticsErrorDesc struct {
Code uint32
Message string
}
func translateCoreAnalyticsErrorDesc(descs []gocbcore.AnalyticsErrorDesc) []AnalyticsErrorDesc {
descsOut := make([]AnalyticsErrorDesc, len(descs))
for descIdx, desc := range descs {
descsOut[descIdx] = AnalyticsErrorDesc{
Code: desc.Code,
Message: desc.Message,
}
}
return descsOut
}
// AnalyticsError is the error type of all analytics query errors.
// UNCOMMITTED: This API may change in the future.
type AnalyticsError struct {
InnerError error `json:"-"`
Statement string `json:"statement,omitempty"`
ClientContextID string `json:"client_context_id,omitempty"`
Errors []AnalyticsErrorDesc `json:"errors,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e AnalyticsError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e AnalyticsError) Unwrap() error {
return e.InnerError
}

View File

@@ -1,72 +0,0 @@
package gocb
import (
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
// HTTPError is the error type of management HTTP errors.
// UNCOMMITTED: This API may change in the future.
type HTTPError struct {
InnerError error `json:"-"`
UniqueID string `json:"unique_id,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e HTTPError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e HTTPError) Unwrap() error {
return e.InnerError
}
func makeGenericHTTPError(baseErr error, req *gocbcore.HTTPRequest, resp *gocbcore.HTTPResponse) error {
if baseErr == nil {
logErrorf("makeGenericHTTPError got an empty error")
baseErr = errors.New("unknown error")
}
err := HTTPError{
InnerError: baseErr,
}
if req != nil {
err.UniqueID = req.UniqueID
}
if resp != nil {
err.Endpoint = resp.Endpoint
}
return err
}
func makeGenericMgmtError(baseErr error, req *mgmtRequest, resp *mgmtResponse) error {
if baseErr == nil {
logErrorf("makeGenericMgmtError got an empty error")
baseErr = errors.New("unknown error")
}
err := HTTPError{
InnerError: baseErr,
}
if req != nil {
err.UniqueID = req.UniqueID
}
if resp != nil {
err.Endpoint = resp.Endpoint
}
return err
}
func makeMgmtBadStatusError(message string, req *mgmtRequest, resp *mgmtResponse) error {
return makeGenericMgmtError(errors.New(message), req, resp)
}

View File

@@ -1,34 +0,0 @@
package gocb
import "github.com/couchbase/gocbcore/v9/memd"
// KeyValueError wraps key-value errors that occur within the SDK.
// UNCOMMITTED: This API may change in the future.
type KeyValueError struct {
InnerError error `json:"-"`
StatusCode memd.StatusCode `json:"status_code,omitempty"`
BucketName string `json:"bucket,omitempty"`
ScopeName string `json:"scope,omitempty"`
CollectionName string `json:"collection,omitempty"`
CollectionID uint32 `json:"collection_id,omitempty"`
ErrorName string `json:"error_name,omitempty"`
ErrorDescription string `json:"error_description,omitempty"`
Opaque uint32 `json:"opaque,omitempty"`
Context string `json:"context,omitempty"`
Ref string `json:"ref,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
LastDispatchedTo string `json:"last_dispatched_to,omitempty"`
LastDispatchedFrom string `json:"last_dispatched_from,omitempty"`
LastConnectionID string `json:"last_connection_id,omitempty"`
}
// Error returns the string representation of a kv error.
func (e KeyValueError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying reason for the error
func (e KeyValueError) Unwrap() error {
return e.InnerError
}

View File

@@ -1,42 +0,0 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// QueryErrorDesc represents a specific error returned from the query service.
type QueryErrorDesc struct {
Code uint32
Message string
}
func translateCoreQueryErrorDesc(descs []gocbcore.N1QLErrorDesc) []QueryErrorDesc {
descsOut := make([]QueryErrorDesc, len(descs))
for descIdx, desc := range descs {
descsOut[descIdx] = QueryErrorDesc{
Code: desc.Code,
Message: desc.Message,
}
}
return descsOut
}
// QueryError is the error type of all query errors.
// UNCOMMITTED: This API may change in the future.
type QueryError struct {
InnerError error `json:"-"`
Statement string `json:"statement,omitempty"`
ClientContextID string `json:"client_context_id,omitempty"`
Errors []QueryErrorDesc `json:"errors,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e QueryError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e QueryError) Unwrap() error {
return e.InnerError
}

View File

@@ -1,23 +0,0 @@
package gocb
// SearchError is the error type of all search query errors.
// UNCOMMITTED: This API may change in the future.
type SearchError struct {
InnerError error `json:"-"`
Query interface{} `json:"query,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
ErrorText string `json:"error_text"`
IndexName string `json:"index_name,omitempty"`
}
// Error returns the string representation of this error.
func (e SearchError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e SearchError) Unwrap() error {
return e.InnerError
}

View File

@@ -1,87 +0,0 @@
package gocb
import (
"encoding/json"
"time"
)
// TimeoutError wraps timeout errors that occur within the SDK.
// UNCOMMITTED: This API may change in the future.
type TimeoutError struct {
InnerError error
OperationID string
Opaque string
TimeObserved time.Duration
RetryReasons []RetryReason
RetryAttempts uint32
LastDispatchedTo string
LastDispatchedFrom string
LastConnectionID string
}
type timeoutError struct {
InnerError error `json:"-"`
OperationID string `json:"s,omitempty"`
Opaque string `json:"i,omitempty"`
TimeObserved uint64 `json:"t,omitempty"`
RetryReasons []string `json:"rr,omitempty"`
RetryAttempts uint32 `json:"ra,omitempty"`
LastDispatchedTo string `json:"r,omitempty"`
LastDispatchedFrom string `json:"l,omitempty"`
LastConnectionID string `json:"c,omitempty"`
}
// MarshalJSON implements the Marshaler interface.
func (err *TimeoutError) MarshalJSON() ([]byte, error) {
var retries []string
for _, rr := range err.RetryReasons {
retries = append(retries, rr.Description())
}
toMarshal := timeoutError{
InnerError: err.InnerError,
OperationID: err.OperationID,
Opaque: err.Opaque,
TimeObserved: uint64(err.TimeObserved / time.Microsecond),
RetryReasons: retries,
RetryAttempts: err.RetryAttempts,
LastDispatchedTo: err.LastDispatchedTo,
LastDispatchedFrom: err.LastDispatchedFrom,
LastConnectionID: err.LastConnectionID,
}
return json.Marshal(toMarshal)
}
// UnmarshalJSON implements the Unmarshaler interface.
func (err *TimeoutError) UnmarshalJSON(data []byte) error {
var tErr *timeoutError
if err := json.Unmarshal(data, &tErr); err != nil {
return err
}
duration := time.Duration(tErr.TimeObserved) * time.Microsecond
// Note that we cannot reasonably unmarshal the retry reasons
err.OperationID = tErr.OperationID
err.Opaque = tErr.Opaque
err.TimeObserved = duration
err.RetryAttempts = tErr.RetryAttempts
err.LastDispatchedTo = tErr.LastDispatchedTo
err.LastDispatchedFrom = tErr.LastDispatchedFrom
err.LastConnectionID = tErr.LastConnectionID
return nil
}
func (err TimeoutError) Error() string {
if err.InnerError == nil {
return serializeWrappedError(err)
}
return err.InnerError.Error() + " | " + serializeWrappedError(err)
}
// Unwrap returns the underlying reason for the error
func (err TimeoutError) Unwrap() error {
return err.InnerError
}

View File

@@ -1,42 +0,0 @@
package gocb
import gocbcore "github.com/couchbase/gocbcore/v9"
// ViewErrorDesc represents a specific error returned from the views service.
type ViewErrorDesc struct {
SourceNode string
Message string
}
func translateCoreViewErrorDesc(descs []gocbcore.ViewQueryErrorDesc) []ViewErrorDesc {
descsOut := make([]ViewErrorDesc, len(descs))
for descIdx, desc := range descs {
descsOut[descIdx] = ViewErrorDesc{
SourceNode: desc.SourceNode,
Message: desc.Message,
}
}
return descsOut
}
// ViewError is the error type of all view query errors.
// UNCOMMITTED: This API may change in the future.
type ViewError struct {
InnerError error `json:"-"`
DesignDocumentName string `json:"design_document_name,omitempty"`
ViewName string `json:"view_name,omitempty"`
Errors []ViewErrorDesc `json:"errors,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RetryReasons []RetryReason `json:"retry_reasons,omitempty"`
RetryAttempts uint32 `json:"retry_attempts,omitempty"`
}
// Error returns the string representation of this error.
func (e ViewError) Error() string {
return e.InnerError.Error() + " | " + serializeWrappedError(e)
}
// Unwrap returns the underlying cause for this error.
func (e ViewError) Unwrap() error {
return e.InnerError
}

View File

@@ -1,130 +0,0 @@
package gocb
import (
"encoding/json"
gocbcore "github.com/couchbase/gocbcore/v9"
)
func serializeWrappedError(err error) string {
errBytes, serErr := json.Marshal(err)
if serErr != nil {
logErrorf("failed to serialize error to json: %s", serErr.Error())
}
return string(errBytes)
}
func maybeEnhanceCoreErr(err error) error {
if kvErr, ok := err.(*gocbcore.KeyValueError); ok {
return &KeyValueError{
InnerError: kvErr.InnerError,
StatusCode: kvErr.StatusCode,
BucketName: kvErr.BucketName,
ScopeName: kvErr.ScopeName,
CollectionName: kvErr.CollectionName,
CollectionID: kvErr.CollectionID,
ErrorName: kvErr.ErrorName,
ErrorDescription: kvErr.ErrorDescription,
Opaque: kvErr.Opaque,
Context: kvErr.Context,
Ref: kvErr.Ref,
RetryReasons: translateCoreRetryReasons(kvErr.RetryReasons),
RetryAttempts: kvErr.RetryAttempts,
LastDispatchedTo: kvErr.LastDispatchedTo,
LastDispatchedFrom: kvErr.LastDispatchedFrom,
LastConnectionID: kvErr.LastConnectionID,
}
}
if viewErr, ok := err.(*gocbcore.ViewError); ok {
return &ViewError{
InnerError: viewErr.InnerError,
DesignDocumentName: viewErr.DesignDocumentName,
ViewName: viewErr.ViewName,
Errors: translateCoreViewErrorDesc(viewErr.Errors),
Endpoint: viewErr.Endpoint,
RetryReasons: translateCoreRetryReasons(viewErr.RetryReasons),
RetryAttempts: viewErr.RetryAttempts,
}
}
if queryErr, ok := err.(*gocbcore.N1QLError); ok {
return &QueryError{
InnerError: queryErr.InnerError,
Statement: queryErr.Statement,
ClientContextID: queryErr.ClientContextID,
Errors: translateCoreQueryErrorDesc(queryErr.Errors),
Endpoint: queryErr.Endpoint,
RetryReasons: translateCoreRetryReasons(queryErr.RetryReasons),
RetryAttempts: queryErr.RetryAttempts,
}
}
if analyticsErr, ok := err.(*gocbcore.AnalyticsError); ok {
return &AnalyticsError{
InnerError: analyticsErr.InnerError,
Statement: analyticsErr.Statement,
ClientContextID: analyticsErr.ClientContextID,
Errors: translateCoreAnalyticsErrorDesc(analyticsErr.Errors),
Endpoint: analyticsErr.Endpoint,
RetryReasons: translateCoreRetryReasons(analyticsErr.RetryReasons),
RetryAttempts: analyticsErr.RetryAttempts,
}
}
if searchErr, ok := err.(*gocbcore.SearchError); ok {
return &SearchError{
InnerError: searchErr.InnerError,
Query: searchErr.Query,
Endpoint: searchErr.Endpoint,
RetryReasons: translateCoreRetryReasons(searchErr.RetryReasons),
RetryAttempts: searchErr.RetryAttempts,
ErrorText: searchErr.ErrorText,
IndexName: searchErr.IndexName,
}
}
if httpErr, ok := err.(*gocbcore.HTTPError); ok {
return &HTTPError{
InnerError: httpErr.InnerError,
UniqueID: httpErr.UniqueID,
Endpoint: httpErr.Endpoint,
RetryReasons: translateCoreRetryReasons(httpErr.RetryReasons),
RetryAttempts: httpErr.RetryAttempts,
}
}
if timeoutErr, ok := err.(*gocbcore.TimeoutError); ok {
return &TimeoutError{
InnerError: timeoutErr.InnerError,
OperationID: timeoutErr.OperationID,
Opaque: timeoutErr.Opaque,
TimeObserved: timeoutErr.TimeObserved,
RetryReasons: translateCoreRetryReasons(timeoutErr.RetryReasons),
RetryAttempts: timeoutErr.RetryAttempts,
LastDispatchedTo: timeoutErr.LastDispatchedTo,
LastDispatchedFrom: timeoutErr.LastDispatchedFrom,
LastConnectionID: timeoutErr.LastConnectionID,
}
}
return err
}
func maybeEnhanceKVErr(err error, bucketName, scopeName, collName, docKey string) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceCollKVErr(err error, bucket kvProvider, coll *Collection, docKey string) error {
return maybeEnhanceKVErr(err, coll.bucketName(), coll.Name(), coll.ScopeName(), docKey)
}
func maybeEnhanceViewError(err error) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceQueryError(err error) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceAnalyticsError(err error) error {
return maybeEnhanceCoreErr(err)
}
func maybeEnhanceSearchError(err error) error {
return maybeEnhanceCoreErr(err)
}

View File

@@ -1,12 +0,0 @@
module github.com/couchbase/gocb/v2
require (
github.com/couchbase/gocbcore/v9 v9.0.4
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/uuid v1.1.1
github.com/pkg/errors v0.9.1
github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.5.1
)
go 1.13

View File

@@ -1,24 +0,0 @@
github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8=
github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -1,273 +0,0 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/couchbase/gocbcore/v9/memd"
"github.com/pkg/errors"
)
type kvOpManager struct {
parent *Collection
signal chan struct{}
err error
wasResolved bool
mutationToken *MutationToken
span requestSpan
documentID string
transcoder Transcoder
timeout time.Duration
deadline time.Time
bytes []byte
flags uint32
persistTo uint
replicateTo uint
durabilityLevel DurabilityLevel
retryStrategy *retryStrategyWrapper
cancelCh chan struct{}
}
func (m *kvOpManager) getTimeout() time.Duration {
if m.timeout > 0 {
return m.timeout
}
defaultTimeout := m.parent.timeoutsConfig.KVTimeout
if m.durabilityLevel > DurabilityLevelMajority || m.persistTo > 0 {
defaultTimeout = m.parent.timeoutsConfig.KVDurableTimeout
}
return defaultTimeout
}
func (m *kvOpManager) SetDocumentID(id string) {
m.documentID = id
}
func (m *kvOpManager) SetCancelCh(cancelCh chan struct{}) {
m.cancelCh = cancelCh
}
func (m *kvOpManager) SetTimeout(timeout time.Duration) {
m.timeout = timeout
}
func (m *kvOpManager) SetTranscoder(transcoder Transcoder) {
if transcoder == nil {
transcoder = m.parent.transcoder
}
m.transcoder = transcoder
}
func (m *kvOpManager) SetValue(val interface{}) {
if m.err != nil {
return
}
if m.transcoder == nil {
m.err = errors.New("Expected a transcoder to be specified first")
return
}
espan := m.parent.startKvOpTrace("encode", m.span)
defer espan.Finish()
bytes, flags, err := m.transcoder.Encode(val)
if err != nil {
m.err = err
return
}
m.bytes = bytes
m.flags = flags
}
func (m *kvOpManager) SetDuraOptions(persistTo, replicateTo uint, level DurabilityLevel) {
if persistTo != 0 || replicateTo != 0 {
if !m.parent.useMutationTokens {
m.err = makeInvalidArgumentsError("cannot use observe based durability without mutation tokens")
return
}
if level > 0 {
m.err = makeInvalidArgumentsError("cannot mix observe based durability and synchronous durability")
return
}
}
m.persistTo = persistTo
m.replicateTo = replicateTo
m.durabilityLevel = level
}
func (m *kvOpManager) SetRetryStrategy(retryStrategy RetryStrategy) {
wrapper := m.parent.retryStrategyWrapper
if retryStrategy != nil {
wrapper = newRetryStrategyWrapper(retryStrategy)
}
m.retryStrategy = wrapper
}
func (m *kvOpManager) Finish() {
m.span.Finish()
}
func (m *kvOpManager) TraceSpan() requestSpan {
return m.span
}
func (m *kvOpManager) DocumentID() []byte {
return []byte(m.documentID)
}
func (m *kvOpManager) CollectionName() string {
return m.parent.name()
}
func (m *kvOpManager) ScopeName() string {
return m.parent.ScopeName()
}
func (m *kvOpManager) BucketName() string {
return m.parent.bucketName()
}
func (m *kvOpManager) ValueBytes() []byte {
return m.bytes
}
func (m *kvOpManager) ValueFlags() uint32 {
return m.flags
}
func (m *kvOpManager) Transcoder() Transcoder {
return m.transcoder
}
func (m *kvOpManager) DurabilityLevel() memd.DurabilityLevel {
return memd.DurabilityLevel(m.durabilityLevel)
}
func (m *kvOpManager) DurabilityTimeout() time.Duration {
timeout := m.getTimeout()
duraTimeout := timeout * 10 / 9
return duraTimeout
}
func (m *kvOpManager) Deadline() time.Time {
if m.deadline.IsZero() {
timeout := m.getTimeout()
m.deadline = time.Now().Add(timeout)
}
return m.deadline
}
func (m *kvOpManager) RetryStrategy() *retryStrategyWrapper {
return m.retryStrategy
}
func (m *kvOpManager) CheckReadyForOp() error {
if m.err != nil {
return m.err
}
if m.getTimeout() == 0 {
return errors.New("op manager had no timeout specified")
}
return nil
}
func (m *kvOpManager) NeedsObserve() bool {
return m.persistTo > 0 || m.replicateTo > 0
}
func (m *kvOpManager) EnhanceErr(err error) error {
return maybeEnhanceCollKVErr(err, nil, m.parent, m.documentID)
}
func (m *kvOpManager) EnhanceMt(token gocbcore.MutationToken) *MutationToken {
if token.VbUUID != 0 {
return &MutationToken{
token: token,
bucketName: m.BucketName(),
}
}
return nil
}
func (m *kvOpManager) Reject() {
m.signal <- struct{}{}
}
func (m *kvOpManager) Resolve(token *MutationToken) {
m.wasResolved = true
m.mutationToken = token
m.signal <- struct{}{}
}
func (m *kvOpManager) Wait(op gocbcore.PendingOp, err error) error {
if err != nil {
return err
}
if m.err != nil {
op.Cancel()
}
select {
case <-m.signal:
// Good to go
case <-m.cancelCh:
op.Cancel()
<-m.signal
}
if m.wasResolved && (m.persistTo > 0 || m.replicateTo > 0) {
if m.mutationToken == nil {
return errors.New("expected a mutation token")
}
return m.parent.waitForDurability(
m.span,
m.documentID,
m.mutationToken.token,
m.replicateTo,
m.persistTo,
m.Deadline(),
m.cancelCh,
)
}
return nil
}
func (c *Collection) newKvOpManager(opName string, tracectx requestSpanContext) *kvOpManager {
span := c.startKvOpTrace(opName, tracectx)
return &kvOpManager{
parent: c,
signal: make(chan struct{}, 1),
span: span,
}
}
func durationToExpiry(dura time.Duration) uint32 {
// If the duration is 0, that indicates never-expires
if dura == 0 {
return 0
}
// If the duration is less than one second, we must force the
// value to 1 to avoid accidentally making it never expire.
if dura < 1*time.Second {
return 1
}
// Translate into a uint32 in seconds.
return uint32(dura / time.Second)
}

View File

@@ -1,148 +0,0 @@
package gocb
import (
"fmt"
"log"
"strings"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// LogLevel specifies the severity of a log message.
type LogLevel gocbcore.LogLevel
// Various logging levels (or subsystems) which can categorize the message.
// Currently these are ordered in decreasing severity.
const (
LogError LogLevel = LogLevel(gocbcore.LogError)
LogWarn LogLevel = LogLevel(gocbcore.LogWarn)
LogInfo LogLevel = LogLevel(gocbcore.LogInfo)
LogDebug LogLevel = LogLevel(gocbcore.LogDebug)
LogTrace LogLevel = LogLevel(gocbcore.LogTrace)
LogSched LogLevel = LogLevel(gocbcore.LogSched)
LogMaxVerbosity LogLevel = LogLevel(gocbcore.LogMaxVerbosity)
)
// LogRedactLevel specifies the degree with which to redact the logs.
type LogRedactLevel uint
const (
// RedactNone indicates to perform no redactions
RedactNone LogRedactLevel = iota
// RedactPartial indicates to redact all possible user-identifying information from logs.
RedactPartial
// RedactFull indicates to fully redact all possible identifying information from logs.
RedactFull
)
// SetLogRedactionLevel specifies the level with which logs should be redacted.
func SetLogRedactionLevel(level LogRedactLevel) {
globalLogRedactionLevel = level
gocbcore.SetLogRedactionLevel(gocbcore.LogRedactLevel(level))
}
// Logger defines a logging interface. You can either use one of the default loggers
// (DefaultStdioLogger(), VerboseStdioLogger()) or implement your own.
type Logger interface {
// Outputs logging information:
// level is the verbosity level
// offset is the position within the calling stack from which the message
// originated. This is useful for contextual loggers which retrieve file/line
// information.
Log(level LogLevel, offset int, format string, v ...interface{}) error
}
var (
globalLogger Logger
globalLogRedactionLevel LogRedactLevel
)
type coreLogWrapper struct {
wrapped gocbcore.Logger
}
func (wrapper coreLogWrapper) Log(level LogLevel, offset int, format string, v ...interface{}) error {
return wrapper.wrapped.Log(gocbcore.LogLevel(level), offset+2, format, v...)
}
// DefaultStdioLogger gets the default standard I/O logger.
// gocb.SetLogger(gocb.DefaultStdioLogger())
func DefaultStdioLogger() Logger {
return &coreLogWrapper{
wrapped: gocbcore.DefaultStdioLogger(),
}
}
// VerboseStdioLogger is a more verbose level of DefaultStdioLogger(). Messages
// pertaining to the scheduling of ordinary commands (and their responses) will
// also be emitted.
// gocb.SetLogger(gocb.VerboseStdioLogger())
func VerboseStdioLogger() Logger {
return coreLogWrapper{
wrapped: gocbcore.VerboseStdioLogger(),
}
}
type coreLogger struct {
wrapped Logger
}
func (wrapper coreLogger) Log(level gocbcore.LogLevel, offset int, format string, v ...interface{}) error {
return wrapper.wrapped.Log(LogLevel(level), offset+2, format, v...)
}
func getCoreLogger(logger Logger) gocbcore.Logger {
typedLogger, isCoreLogger := logger.(*coreLogWrapper)
if isCoreLogger {
return typedLogger.wrapped
}
return &coreLogger{
wrapped: logger,
}
}
// SetLogger sets a logger to be used by the library. A logger can be obtained via
// the DefaultStdioLogger() or VerboseStdioLogger() functions. You can also implement
// your own logger using the Logger interface.
func SetLogger(logger Logger) {
globalLogger = logger
gocbcore.SetLogger(getCoreLogger(logger))
// gocbcore.SetLogRedactionLevel(gocbcore.LogRedactLevel(globalLogRedactionLevel))
}
func logExf(level LogLevel, offset int, format string, v ...interface{}) {
if globalLogger != nil {
err := globalLogger.Log(level, offset+1, format, v...)
if err != nil {
log.Printf("Logger error occurred (%s)\n", err)
}
}
}
func logInfof(format string, v ...interface{}) {
logExf(LogInfo, 1, format, v...)
}
func logDebugf(format string, v ...interface{}) {
logExf(LogDebug, 1, format, v...)
}
func logSchedf(format string, v ...interface{}) {
logExf(LogSched, 1, format, v...)
}
func logWarnf(format string, v ...interface{}) {
logExf(LogWarn, 1, format, v...)
}
func logErrorf(format string, v ...interface{}) {
logExf(LogError, 1, format, v...)
}
func reindentLog(indent, message string) string {
reindentedMessage := strings.Replace(message, "\n", "\n"+indent, -1)
return fmt.Sprintf("%s%s", indent, reindentedMessage)
}

View File

@@ -1,126 +0,0 @@
package gocb
import (
"io"
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type mgmtRequest struct {
Service ServiceType
Method string
Path string
Body []byte
Headers map[string]string
ContentType string
IsIdempotent bool
UniqueID string
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
type mgmtResponse struct {
Endpoint string
StatusCode uint32
Body io.ReadCloser
}
type mgmtProvider interface {
executeMgmtRequest(req mgmtRequest) (*mgmtResponse, error)
}
func (c *Cluster) executeMgmtRequest(req mgmtRequest) (mgmtRespOut *mgmtResponse, errOut error) {
timeout := req.Timeout
if timeout == 0 {
timeout = c.timeoutsConfig.ManagementTimeout
}
provider, err := c.getHTTPProvider()
if err != nil {
return nil, err
}
retryStrategy := c.retryStrategyWrapper
if req.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(req.RetryStrategy)
}
corereq := &gocbcore.HTTPRequest{
Service: gocbcore.ServiceType(req.Service),
Method: req.Method,
Path: req.Path,
Body: req.Body,
Headers: req.Headers,
ContentType: req.ContentType,
IsIdempotent: req.IsIdempotent,
UniqueID: req.UniqueID,
Deadline: time.Now().Add(timeout),
RetryStrategy: retryStrategy,
TraceContext: req.parentSpan,
}
coreresp, err := provider.DoHTTPRequest(corereq)
if err != nil {
return nil, makeGenericHTTPError(err, corereq, coreresp)
}
resp := &mgmtResponse{
Endpoint: coreresp.Endpoint,
StatusCode: uint32(coreresp.StatusCode),
Body: coreresp.Body,
}
return resp, nil
}
func (b *Bucket) executeMgmtRequest(req mgmtRequest) (mgmtRespOut *mgmtResponse, errOut error) {
timeout := req.Timeout
if timeout == 0 {
timeout = b.timeoutsConfig.ManagementTimeout
}
provider, err := b.connectionManager.getHTTPProvider()
if err != nil {
return nil, err
}
retryStrategy := b.retryStrategyWrapper
if req.RetryStrategy != nil {
retryStrategy = newRetryStrategyWrapper(req.RetryStrategy)
}
corereq := &gocbcore.HTTPRequest{
Service: gocbcore.ServiceType(req.Service),
Method: req.Method,
Path: req.Path,
Body: req.Body,
Headers: req.Headers,
ContentType: req.ContentType,
IsIdempotent: req.IsIdempotent,
UniqueID: req.UniqueID,
Deadline: time.Now().Add(timeout),
RetryStrategy: retryStrategy,
}
coreresp, err := provider.DoHTTPRequest(corereq)
if err != nil {
return nil, makeGenericHTTPError(err, corereq, coreresp)
}
resp := &mgmtResponse{
Endpoint: coreresp.Endpoint,
StatusCode: uint32(coreresp.StatusCode),
Body: coreresp.Body,
}
return resp, nil
}
func ensureBodyClosed(body io.ReadCloser) {
err := body.Close()
if err != nil {
logDebugf("Failed to close socket: %v", err)
}
}

View File

@@ -1,230 +0,0 @@
package gocb
import (
"time"
gocbcore "github.com/couchbase/gocbcore/v9"
)
type httpProvider interface {
DoHTTPRequest(req *gocbcore.HTTPRequest) (*gocbcore.HTTPResponse, error)
}
type viewProvider interface {
ViewQuery(opts gocbcore.ViewQueryOptions) (viewRowReader, error)
}
type queryProvider interface {
N1QLQuery(opts gocbcore.N1QLQueryOptions) (queryRowReader, error)
PreparedN1QLQuery(opts gocbcore.N1QLQueryOptions) (queryRowReader, error)
}
type analyticsProvider interface {
AnalyticsQuery(opts gocbcore.AnalyticsQueryOptions) (analyticsRowReader, error)
}
type searchProvider interface {
SearchQuery(opts gocbcore.SearchQueryOptions) (searchRowReader, error)
}
type waitUntilReadyProvider interface {
WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions) error
}
type gocbcoreWaitUntilReadyProvider interface {
WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions,
cb gocbcore.WaitUntilReadyCallback) (gocbcore.PendingOp, error)
}
type diagnosticsProvider interface {
Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error)
Ping(opts gocbcore.PingOptions) (*gocbcore.PingResult, error)
}
type gocbcoreDiagnosticsProvider interface {
Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error)
Ping(opts gocbcore.PingOptions, cb gocbcore.PingCallback) (gocbcore.PendingOp, error)
}
type waitUntilReadyProviderWrapper struct {
provider gocbcoreWaitUntilReadyProvider
}
func (wpw *waitUntilReadyProviderWrapper) WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions) (errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(wpw.provider.WaitUntilReady(deadline, opts, func(res *gocbcore.WaitUntilReadyResult, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type diagnosticsProviderWrapper struct {
provider gocbcoreDiagnosticsProvider
}
func (dpw *diagnosticsProviderWrapper) Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error) {
return dpw.provider.Diagnostics(opts)
}
func (dpw *diagnosticsProviderWrapper) Ping(opts gocbcore.PingOptions) (pOut *gocbcore.PingResult, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(dpw.provider.Ping(opts, func(res *gocbcore.PingResult, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
pOut = res
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type httpProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (hpw *httpProviderWrapper) DoHTTPRequest(req *gocbcore.HTTPRequest) (respOut *gocbcore.HTTPResponse, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(hpw.provider.DoHTTPRequest(req, func(res *gocbcore.HTTPResponse, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
respOut = res
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type analyticsProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *analyticsProviderWrapper) AnalyticsQuery(opts gocbcore.AnalyticsQueryOptions) (aOut analyticsRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.AnalyticsQuery(opts, func(reader *gocbcore.AnalyticsRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
aOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type queryProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *queryProviderWrapper) N1QLQuery(opts gocbcore.N1QLQueryOptions) (qOut queryRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.N1QLQuery(opts, func(reader *gocbcore.N1QLRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
qOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
func (apw *queryProviderWrapper) PreparedN1QLQuery(opts gocbcore.N1QLQueryOptions) (qOut queryRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.PreparedN1QLQuery(opts, func(reader *gocbcore.N1QLRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
qOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type searchProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *searchProviderWrapper) SearchQuery(opts gocbcore.SearchQueryOptions) (sOut searchRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.SearchQuery(opts, func(reader *gocbcore.SearchRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
sOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}
type viewProviderWrapper struct {
provider *gocbcore.AgentGroup
}
func (apw *viewProviderWrapper) ViewQuery(opts gocbcore.ViewQueryOptions) (vOut viewRowReader, errOut error) {
opm := newAsyncOpManager()
err := opm.Wait(apw.provider.ViewQuery(opts, func(reader *gocbcore.ViewQueryRowReader, err error) {
if err != nil {
errOut = err
opm.Reject()
return
}
vOut = reader
opm.Resolve()
}))
if err != nil {
errOut = err
}
return
}

View File

@@ -1,144 +0,0 @@
package gocb
import (
"strconv"
"strings"
"time"
"github.com/google/uuid"
)
// QueryScanConsistency indicates the level of data consistency desired for a query.
type QueryScanConsistency uint
const (
// QueryScanConsistencyNotBounded indicates no data consistency is required.
QueryScanConsistencyNotBounded QueryScanConsistency = iota + 1
// QueryScanConsistencyRequestPlus indicates that request-level data consistency is required.
QueryScanConsistencyRequestPlus
)
// QueryOptions represents the options available when executing a query.
type QueryOptions struct {
ScanConsistency QueryScanConsistency
ConsistentWith *MutationState
Profile QueryProfileMode
// ScanCap is the maximum buffered channel size between the indexer connectionManager and the query service for index scans.
ScanCap uint32
// PipelineBatch controls the number of items execution operators can batch for Fetch from the KV.
PipelineBatch uint32
// PipelineCap controls the maximum number of items each execution operator can buffer between various operators.
PipelineCap uint32
// ScanWait is how long the indexer is allowed to wait until it can satisfy ScanConsistency/ConsistentWith criteria.
ScanWait time.Duration
Readonly bool
// MaxParallelism is the maximum number of index partitions, for computing aggregation in parallel.
MaxParallelism uint32
// ClientContextID provides a unique ID for this query which can be used matching up requests between connectionManager and
// server. If not provided will be assigned a uuid value.
ClientContextID string
PositionalParameters []interface{}
NamedParameters map[string]interface{}
Metrics bool
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]interface{}
Adhoc bool
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *QueryOptions) toMap() (map[string]interface{}, error) {
execOpts := make(map[string]interface{})
if opts.ScanConsistency != 0 && opts.ConsistentWith != nil {
return nil, makeInvalidArgumentsError("ScanConsistency and ConsistentWith must be used exclusively")
}
if opts.ScanConsistency != 0 {
if opts.ScanConsistency == QueryScanConsistencyNotBounded {
execOpts["scan_consistency"] = "not_bounded"
} else if opts.ScanConsistency == QueryScanConsistencyRequestPlus {
execOpts["scan_consistency"] = "request_plus"
} else {
return nil, makeInvalidArgumentsError("Unexpected consistency option")
}
}
if opts.ConsistentWith != nil {
execOpts["scan_consistency"] = "at_plus"
execOpts["scan_vectors"] = opts.ConsistentWith
}
if opts.Profile != "" {
execOpts["profile"] = opts.Profile
}
if opts.Readonly {
execOpts["readonly"] = opts.Readonly
}
if opts.PositionalParameters != nil && opts.NamedParameters != nil {
return nil, makeInvalidArgumentsError("Positional and named parameters must be used exclusively")
}
if opts.PositionalParameters != nil {
execOpts["args"] = opts.PositionalParameters
}
if opts.NamedParameters != nil {
for key, value := range opts.NamedParameters {
if !strings.HasPrefix(key, "$") {
key = "$" + key
}
execOpts[key] = value
}
}
if opts.ScanCap != 0 {
execOpts["scan_cap"] = strconv.FormatUint(uint64(opts.ScanCap), 10)
}
if opts.PipelineBatch != 0 {
execOpts["pipeline_batch"] = strconv.FormatUint(uint64(opts.PipelineBatch), 10)
}
if opts.PipelineCap != 0 {
execOpts["pipeline_cap"] = strconv.FormatUint(uint64(opts.PipelineCap), 10)
}
if opts.ScanWait > 0 {
execOpts["scan_wait"] = opts.ScanWait.String()
}
if opts.Raw != nil {
for k, v := range opts.Raw {
execOpts[k] = v
}
}
if opts.MaxParallelism > 0 {
execOpts["max_parallelism"] = strconv.FormatUint(uint64(opts.MaxParallelism), 10)
}
if !opts.Metrics {
execOpts["metrics"] = false
}
if opts.ClientContextID == "" {
execOpts["client_context_id"] = uuid.New()
} else {
execOpts["client_context_id"] = opts.ClientContextID
}
return execOpts, nil
}

View File

@@ -1,350 +0,0 @@
package gocb
import (
"encoding/json"
"time"
"github.com/pkg/errors"
)
// Result is the base type for the return types of operations
type Result struct {
cas Cas
}
// Cas returns the cas of the result.
func (d *Result) Cas() Cas {
return d.cas
}
// GetResult is the return type of Get operations.
type GetResult struct {
Result
transcoder Transcoder
flags uint32
contents []byte
expiry *time.Duration
}
// Content assigns the value of the result into the valuePtr using default decoding.
func (d *GetResult) Content(valuePtr interface{}) error {
return d.transcoder.Decode(d.contents, d.flags, valuePtr)
}
// Expiry returns the expiry value for the result if it available. Note that a nil
// pointer indicates that the Expiry was fetched, while a valid pointer to a zero
// Duration indicates that the document will never expire.
func (d *GetResult) Expiry() *time.Duration {
return d.expiry
}
func (d *GetResult) fromFullProjection(ops []LookupInSpec, result *LookupInResult, fields []string) error {
if len(fields) == 0 {
// This is a special case where user specified a full doc fetch with expiration.
d.contents = result.contents[0].data
return nil
}
if len(result.contents) != 1 {
return makeInvalidArgumentsError("fromFullProjection should only be called with 1 subdoc result")
}
resultContent := result.contents[0]
if resultContent.err != nil {
return resultContent.err
}
var content map[string]interface{}
err := json.Unmarshal(resultContent.data, &content)
if err != nil {
return err
}
newContent := make(map[string]interface{})
for _, field := range fields {
parts := d.pathParts(field)
d.set(parts, newContent, content[field])
}
bytes, err := json.Marshal(newContent)
if err != nil {
return errors.Wrap(err, "could not marshal result contents")
}
d.contents = bytes
return nil
}
func (d *GetResult) fromSubDoc(ops []LookupInSpec, result *LookupInResult) error {
content := make(map[string]interface{})
for i, op := range ops {
err := result.contents[i].err
if err != nil {
// We return the first error that has occurred, this will be
// a SubDocument error and will indicate the real reason.
return err
}
parts := d.pathParts(op.path)
d.set(parts, content, result.contents[i].data)
}
bytes, err := json.Marshal(content)
if err != nil {
return errors.Wrap(err, "could not marshal result contents")
}
d.contents = bytes
return nil
}
type subdocPath struct {
path string
isArray bool
}
func (d *GetResult) pathParts(pathStr string) []subdocPath {
pathLen := len(pathStr)
var elemIdx int
var i int
var paths []subdocPath
for i < pathLen {
ch := pathStr[i]
i++
if ch == '[' {
// opening of an array
isArr := false
arrayStart := i
for i < pathLen {
arrCh := pathStr[i]
if arrCh == ']' {
isArr = true
i++
break
} else if arrCh == '.' {
i++
break
}
i++
}
if isArr {
paths = append(paths, subdocPath{path: pathStr[elemIdx : arrayStart-1], isArray: true})
} else {
paths = append(paths, subdocPath{path: pathStr[elemIdx:i], isArray: false})
}
elemIdx = i
if i < pathLen && pathStr[i] == '.' {
i++
elemIdx = i
}
} else if ch == '.' {
paths = append(paths, subdocPath{path: pathStr[elemIdx : i-1]})
elemIdx = i
}
}
if elemIdx != i {
// this should only ever be an object as an array would have ended in [...]
paths = append(paths, subdocPath{path: pathStr[elemIdx:i]})
}
return paths
}
func (d *GetResult) set(paths []subdocPath, content interface{}, value interface{}) interface{} {
path := paths[0]
if len(paths) == 1 {
if path.isArray {
arr := make([]interface{}, 0)
arr = append(arr, value)
if _, ok := content.(map[string]interface{}); ok {
content.(map[string]interface{})[path.path] = arr
} else if _, ok := content.([]interface{}); ok {
content = append(content.([]interface{}), arr)
} else {
logErrorf("Projections encountered a non-array or object content assigning an array")
}
} else {
if _, ok := content.([]interface{}); ok {
elem := make(map[string]interface{})
elem[path.path] = value
content = append(content.([]interface{}), elem)
} else {
content.(map[string]interface{})[path.path] = value
}
}
return content
}
if path.isArray {
if _, ok := content.([]interface{}); ok {
var m []interface{}
content = append(content.([]interface{}), d.set(paths[1:], m, value))
return content
} else if cMap, ok := content.(map[string]interface{}); ok {
cMap[path.path] = make([]interface{}, 0)
cMap[path.path] = d.set(paths[1:], cMap[path.path], value)
return content
} else {
logErrorf("Projections encountered a non-array or object content assigning an array")
}
} else {
if arr, ok := content.([]interface{}); ok {
m := make(map[string]interface{})
m[path.path] = make(map[string]interface{})
content = append(arr, m)
d.set(paths[1:], m[path.path], value)
return content
}
cMap, ok := content.(map[string]interface{})
if !ok {
// this isn't possible but the linter won't play nice without it
logErrorf("Failed to assert projection content to a map")
}
cMap[path.path] = make(map[string]interface{})
return d.set(paths[1:], cMap[path.path], value)
}
return content
}
// LookupInResult is the return type for LookupIn.
type LookupInResult struct {
Result
contents []lookupInPartial
}
type lookupInPartial struct {
data json.RawMessage
err error
}
func (pr *lookupInPartial) as(valuePtr interface{}) error {
if pr.err != nil {
return pr.err
}
if valuePtr == nil {
return nil
}
if valuePtr, ok := valuePtr.(*[]byte); ok {
*valuePtr = pr.data
return nil
}
return json.Unmarshal(pr.data, valuePtr)
}
func (pr *lookupInPartial) exists() bool {
err := pr.as(nil)
return err == nil
}
// ContentAt retrieves the value of the operation by its index. The index is the position of
// the operation as it was added to the builder.
func (lir *LookupInResult) ContentAt(idx uint, valuePtr interface{}) error {
if idx >= uint(len(lir.contents)) {
return makeInvalidArgumentsError("invalid index")
}
return lir.contents[idx].as(valuePtr)
}
// Exists verifies that the item at idx exists.
func (lir *LookupInResult) Exists(idx uint) bool {
if idx >= uint(len(lir.contents)) {
return false
}
return lir.contents[idx].exists()
}
// ExistsResult is the return type of Exist operations.
type ExistsResult struct {
Result
docExists bool
}
// Exists returns whether or not the document exists.
func (d *ExistsResult) Exists() bool {
return d.docExists
}
// MutationResult is the return type of any store related operations. It contains Cas and mutation tokens.
type MutationResult struct {
Result
mt *MutationToken
}
// MutationToken returns the mutation token belonging to an operation.
func (mr MutationResult) MutationToken() *MutationToken {
return mr.mt
}
// MutateInResult is the return type of any mutate in related operations.
// It contains Cas, mutation tokens and any returned content.
type MutateInResult struct {
MutationResult
contents []mutateInPartial
}
type mutateInPartial struct {
data json.RawMessage
}
func (pr *mutateInPartial) as(valuePtr interface{}) error {
if valuePtr == nil {
return nil
}
if valuePtr, ok := valuePtr.(*[]byte); ok {
*valuePtr = pr.data
return nil
}
return json.Unmarshal(pr.data, valuePtr)
}
// ContentAt retrieves the value of the operation by its index. The index is the position of
// the operation as it was added to the builder.
func (mir MutateInResult) ContentAt(idx uint, valuePtr interface{}) error {
return mir.contents[idx].as(valuePtr)
}
// CounterResult is the return type of counter operations.
type CounterResult struct {
MutationResult
content uint64
}
// MutationToken returns the mutation token belonging to an operation.
func (mr CounterResult) MutationToken() *MutationToken {
return mr.mt
}
// Cas returns the Cas value for a document following an operation.
func (mr CounterResult) Cas() Cas {
return mr.cas
}
// Content returns the new value for the counter document.
func (mr CounterResult) Content() uint64 {
return mr.content
}
// GetReplicaResult is the return type of GetReplica operations.
type GetReplicaResult struct {
GetResult
isReplica bool
}
// IsReplica returns whether or not this result came from a replica server.
func (r *GetReplicaResult) IsReplica() bool {
return r.isReplica
}

View File

@@ -1,194 +0,0 @@
package gocb
import (
"time"
"github.com/couchbase/gocbcore/v9"
)
func translateCoreRetryReasons(reasons []gocbcore.RetryReason) []RetryReason {
var reasonsOut []RetryReason
for _, retryReason := range reasons {
gocbReason, ok := retryReason.(RetryReason)
if !ok {
logErrorf("Failed to assert gocbcore retry reason to gocb retry reason: %v", retryReason)
continue
}
reasonsOut = append(reasonsOut, gocbReason)
}
return reasonsOut
}
// RetryRequest is a request that can possibly be retried.
type RetryRequest interface {
RetryAttempts() uint32
Identifier() string
Idempotent() bool
RetryReasons() []RetryReason
}
type wrappedRetryRequest struct {
req gocbcore.RetryRequest
}
func (req *wrappedRetryRequest) RetryAttempts() uint32 {
return req.req.RetryAttempts()
}
func (req *wrappedRetryRequest) Identifier() string {
return req.req.Identifier()
}
func (req *wrappedRetryRequest) Idempotent() bool {
return req.req.Idempotent()
}
func (req *wrappedRetryRequest) RetryReasons() []RetryReason {
return translateCoreRetryReasons(req.req.RetryReasons())
}
// RetryReason represents the reason for an operation possibly being retried.
type RetryReason interface {
AllowsNonIdempotentRetry() bool
AlwaysRetry() bool
Description() string
}
var (
// UnknownRetryReason indicates that the operation failed for an unknown reason.
UnknownRetryReason = RetryReason(gocbcore.UnknownRetryReason)
// SocketNotAvailableRetryReason indicates that the operation failed because the underlying socket was not available.
SocketNotAvailableRetryReason = RetryReason(gocbcore.SocketNotAvailableRetryReason)
// ServiceNotAvailableRetryReason indicates that the operation failed because the requested service was not available.
ServiceNotAvailableRetryReason = RetryReason(gocbcore.ServiceNotAvailableRetryReason)
// NodeNotAvailableRetryReason indicates that the operation failed because the requested node was not available.
NodeNotAvailableRetryReason = RetryReason(gocbcore.NodeNotAvailableRetryReason)
// KVNotMyVBucketRetryReason indicates that the operation failed because it was sent to the wrong node for the vbucket.
KVNotMyVBucketRetryReason = RetryReason(gocbcore.KVNotMyVBucketRetryReason)
// KVCollectionOutdatedRetryReason indicates that the operation failed because the collection ID on the request is outdated.
KVCollectionOutdatedRetryReason = RetryReason(gocbcore.KVCollectionOutdatedRetryReason)
// KVErrMapRetryReason indicates that the operation failed for an unsupported reason but the KV error map indicated
// that the operation can be retried.
KVErrMapRetryReason = RetryReason(gocbcore.KVErrMapRetryReason)
// KVLockedRetryReason indicates that the operation failed because the document was locked.
KVLockedRetryReason = RetryReason(gocbcore.KVLockedRetryReason)
// KVTemporaryFailureRetryReason indicates that the operation failed because of a temporary failure.
KVTemporaryFailureRetryReason = RetryReason(gocbcore.KVTemporaryFailureRetryReason)
// KVSyncWriteInProgressRetryReason indicates that the operation failed because a sync write is in progress.
KVSyncWriteInProgressRetryReason = RetryReason(gocbcore.KVSyncWriteInProgressRetryReason)
// KVSyncWriteRecommitInProgressRetryReason indicates that the operation failed because a sync write recommit is in progress.
KVSyncWriteRecommitInProgressRetryReason = RetryReason(gocbcore.KVSyncWriteRecommitInProgressRetryReason)
// ServiceResponseCodeIndicatedRetryReason indicates that the operation failed and the service responded stating that
// the request should be retried.
ServiceResponseCodeIndicatedRetryReason = RetryReason(gocbcore.ServiceResponseCodeIndicatedRetryReason)
// SocketCloseInFlightRetryReason indicates that the operation failed because the socket was closed whilst the operation
// was in flight.
SocketCloseInFlightRetryReason = RetryReason(gocbcore.SocketCloseInFlightRetryReason)
// CircuitBreakerOpenRetryReason indicates that the operation failed because the circuit breaker on the connection
// was open.
CircuitBreakerOpenRetryReason = RetryReason(gocbcore.CircuitBreakerOpenRetryReason)
// QueryIndexNotFoundRetryReason indicates that the operation failed to to a missing query index
QueryIndexNotFoundRetryReason = RetryReason(gocbcore.QueryIndexNotFoundRetryReason)
// QueryPreparedStatementFailureRetryReason indicates that the operation failed due to a prepared statement failure
QueryPreparedStatementFailureRetryReason = RetryReason(gocbcore.QueryPreparedStatementFailureRetryReason)
// AnalyticsTemporaryFailureRetryReason indicates that an analytics operation failed due to a temporary failure
AnalyticsTemporaryFailureRetryReason = RetryReason(gocbcore.AnalyticsTemporaryFailureRetryReason)
// SearchTooManyRequestsRetryReason indicates that a search operation failed due to too many requests
SearchTooManyRequestsRetryReason = RetryReason(gocbcore.SearchTooManyRequestsRetryReason)
)
// RetryAction is used by a RetryStrategy to calculate the duration to wait before retrying an operation.
// Returning a value of 0 indicates to not retry.
type RetryAction interface {
Duration() time.Duration
}
// NoRetryRetryAction represents an action that indicates to not retry.
type NoRetryRetryAction struct {
}
// Duration is the length of time to wait before retrying an operation.
func (ra *NoRetryRetryAction) Duration() time.Duration {
return 0
}
// WithDurationRetryAction represents an action that indicates to retry with a given duration.
type WithDurationRetryAction struct {
WithDuration time.Duration
}
// Duration is the length of time to wait before retrying an operation.
func (ra *WithDurationRetryAction) Duration() time.Duration {
return ra.WithDuration
}
// RetryStrategy is to determine if an operation should be retried, and if so how long to wait before retrying.
type RetryStrategy interface {
RetryAfter(req RetryRequest, reason RetryReason) RetryAction
}
func newRetryStrategyWrapper(strategy RetryStrategy) *retryStrategyWrapper {
return &retryStrategyWrapper{
wrapped: strategy,
}
}
type retryStrategyWrapper struct {
wrapped RetryStrategy
}
// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation.
func (rs *retryStrategyWrapper) RetryAfter(req gocbcore.RetryRequest, reason gocbcore.RetryReason) gocbcore.RetryAction {
wreq := &wrappedRetryRequest{
req: req,
}
wrappedAction := rs.wrapped.RetryAfter(wreq, RetryReason(reason))
return gocbcore.RetryAction(wrappedAction)
}
// BackoffCalculator defines how backoff durations will be calculated by the retry API.
type BackoffCalculator func(retryAttempts uint32) time.Duration
// BestEffortRetryStrategy represents a strategy that will keep retrying until it succeeds (or the caller times out
// the request).
type BestEffortRetryStrategy struct {
BackoffCalculator BackoffCalculator
}
// NewBestEffortRetryStrategy returns a new BestEffortRetryStrategy which will use the supplied calculator function
// to calculate retry durations. If calculator is nil then a controlled backoff will be used.
func NewBestEffortRetryStrategy(calculator BackoffCalculator) *BestEffortRetryStrategy {
if calculator == nil {
calculator = BackoffCalculator(gocbcore.ExponentialBackoff(1*time.Millisecond, 500*time.Millisecond, 2))
}
return &BestEffortRetryStrategy{BackoffCalculator: calculator}
}
// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation.
func (rs *BestEffortRetryStrategy) RetryAfter(req RetryRequest, reason RetryReason) RetryAction {
if req.Idempotent() || reason.AllowsNonIdempotentRetry() {
return &WithDurationRetryAction{WithDuration: rs.BackoffCalculator(req.RetryAttempts())}
}
return &NoRetryRetryAction{}
}

View File

@@ -1,55 +0,0 @@
package gocb
// Scope represents a single scope within a bucket.
// VOLATILE: This API is subject to change at any time.
type Scope struct {
scopeName string
bucket *Bucket
timeoutsConfig kvTimeoutsConfig
transcoder Transcoder
retryStrategyWrapper *retryStrategyWrapper
tracer requestTracer
useMutationTokens bool
getKvProvider func() (kvProvider, error)
}
func newScope(bucket *Bucket, scopeName string) *Scope {
return &Scope{
scopeName: scopeName,
bucket: bucket,
timeoutsConfig: kvTimeoutsConfig{
KVTimeout: bucket.timeoutsConfig.KVTimeout,
KVDurableTimeout: bucket.timeoutsConfig.KVDurableTimeout,
},
transcoder: bucket.transcoder,
retryStrategyWrapper: bucket.retryStrategyWrapper,
tracer: bucket.tracer,
useMutationTokens: bucket.useMutationTokens,
getKvProvider: bucket.getKvProvider,
}
}
// Name returns the name of the scope.
func (s *Scope) Name() string {
return s.scopeName
}
// BucketName returns the name of the bucket to which this collection belongs.
// UNCOMMITTED: This API may change in the future.
func (s *Scope) BucketName() string {
return s.bucket.Name()
}
// Collection returns an instance of a collection.
// VOLATILE: This API is subject to change at any time.
func (s *Scope) Collection(collectionName string) *Collection {
return newCollection(s, collectionName)
}

View File

@@ -1,110 +0,0 @@
package search
import (
"encoding/json"
)
// Facet represents a facet for a search query.
type Facet interface {
}
type termFacetData struct {
Field string `json:"field,omitempty"`
Size uint64 `json:"size,omitempty"`
}
// TermFacet is an search term facet.
type TermFacet struct {
data termFacetData
}
// MarshalJSON marshal's this facet to JSON for the search REST API.
func (f TermFacet) MarshalJSON() ([]byte, error) {
return json.Marshal(f.data)
}
// NewTermFacet creates a new TermFacet
func NewTermFacet(field string, size uint64) *TermFacet {
mq := &TermFacet{}
mq.data.Field = field
mq.data.Size = size
return mq
}
type numericFacetRange struct {
Name string `json:"name,omitempty"`
Start float64 `json:"start,omitempty"`
End float64 `json:"end,omitempty"`
}
type numericFacetData struct {
Field string `json:"field,omitempty"`
Size uint64 `json:"size,omitempty"`
NumericRanges []numericFacetRange `json:"numeric_ranges,omitempty"`
}
// NumericFacet is an search numeric range facet.
type NumericFacet struct {
data numericFacetData
}
// MarshalJSON marshal's this facet to JSON for the search REST API.
func (f NumericFacet) MarshalJSON() ([]byte, error) {
return json.Marshal(f.data)
}
// AddRange adds a new range to this numeric range facet.
func (f *NumericFacet) AddRange(name string, start, end float64) *NumericFacet {
f.data.NumericRanges = append(f.data.NumericRanges, numericFacetRange{
Name: name,
Start: start,
End: end,
})
return f
}
// NewNumericFacet creates a new numeric range facet.
func NewNumericFacet(field string, size uint64) *NumericFacet {
mq := &NumericFacet{}
mq.data.Field = field
mq.data.Size = size
return mq
}
type dateFacetRange struct {
Name string `json:"name,omitempty"`
Start string `json:"start,omitempty"`
End string `json:"end,omitempty"`
}
type dateFacetData struct {
Field string `json:"field,omitempty"`
Size uint64 `json:"size,omitempty"`
DateRanges []dateFacetRange `json:"date_ranges,omitempty"`
}
// DateFacet is an search date range facet.
type DateFacet struct {
data dateFacetData
}
// MarshalJSON marshal's this facet to JSON for the search REST API.
func (f DateFacet) MarshalJSON() ([]byte, error) {
return json.Marshal(f.data)
}
// AddRange adds a new range to this date range facet.
func (f *DateFacet) AddRange(name string, start, end string) *DateFacet {
f.data.DateRanges = append(f.data.DateRanges, dateFacetRange{
Name: name,
Start: start,
End: end,
})
return f
}
// NewDateFacet creates a new date range facet.
func NewDateFacet(field string, size uint64) *DateFacet {
mq := &DateFacet{}
mq.data.Field = field
mq.data.Size = size
return mq
}

View File

@@ -1,620 +0,0 @@
package search
import "encoding/json"
// Query represents a search query.
type Query interface {
}
type searchQueryBase struct {
options map[string]interface{}
}
func newSearchQueryBase() searchQueryBase {
return searchQueryBase{
options: make(map[string]interface{}),
}
}
// MarshalJSON marshal's this query to JSON for the search REST API.
func (q searchQueryBase) MarshalJSON() ([]byte, error) {
return json.Marshal(q.options)
}
// MatchQuery represents a search match query.
type MatchQuery struct {
searchQueryBase
}
// NewMatchQuery creates a new MatchQuery.
func NewMatchQuery(match string) *MatchQuery {
q := &MatchQuery{newSearchQueryBase()}
q.options["match"] = match
return q
}
// Field specifies the field for this query.
func (q *MatchQuery) Field(field string) *MatchQuery {
q.options["field"] = field
return q
}
// Analyzer specifies the analyzer to use for this query.
func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery {
q.options["analyzer"] = analyzer
return q
}
// PrefixLength specifies the prefix length from this query.
func (q *MatchQuery) PrefixLength(length uint64) *MatchQuery {
q.options["prefix_length"] = length
return q
}
// Fuzziness specifies the fuziness for this query.
func (q *MatchQuery) Fuzziness(fuzziness uint64) *MatchQuery {
q.options["fuzziness"] = fuzziness
return q
}
// Boost specifies the boost for this query.
func (q *MatchQuery) Boost(boost float32) *MatchQuery {
q.options["boost"] = boost
return q
}
// MatchPhraseQuery represents a search match phrase query.
type MatchPhraseQuery struct {
searchQueryBase
}
// NewMatchPhraseQuery creates a new MatchPhraseQuery
func NewMatchPhraseQuery(phrase string) *MatchPhraseQuery {
q := &MatchPhraseQuery{newSearchQueryBase()}
q.options["match_phrase"] = phrase
return q
}
// Field specifies the field for this query.
func (q *MatchPhraseQuery) Field(field string) *MatchPhraseQuery {
q.options["field"] = field
return q
}
// Analyzer specifies the analyzer to use for this query.
func (q *MatchPhraseQuery) Analyzer(analyzer string) *MatchPhraseQuery {
q.options["analyzer"] = analyzer
return q
}
// Boost specifies the boost for this query.
func (q *MatchPhraseQuery) Boost(boost float32) *MatchPhraseQuery {
q.options["boost"] = boost
return q
}
// RegexpQuery represents a search regular expression query.
type RegexpQuery struct {
searchQueryBase
}
// NewRegexpQuery creates a new RegexpQuery.
func NewRegexpQuery(regexp string) *RegexpQuery {
q := &RegexpQuery{newSearchQueryBase()}
q.options["regexp"] = regexp
return q
}
// Field specifies the field for this query.
func (q *RegexpQuery) Field(field string) *RegexpQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *RegexpQuery) Boost(boost float32) *RegexpQuery {
q.options["boost"] = boost
return q
}
// QueryStringQuery represents a search string query.
type QueryStringQuery struct {
searchQueryBase
}
// NewQueryStringQuery creates a new StringQuery.
func NewQueryStringQuery(query string) *QueryStringQuery {
q := &QueryStringQuery{newSearchQueryBase()}
q.options["query"] = query
return q
}
// Boost specifies the boost for this query.
func (q *QueryStringQuery) Boost(boost float32) *QueryStringQuery {
q.options["boost"] = boost
return q
}
// NumericRangeQuery represents a search numeric range query.
type NumericRangeQuery struct {
searchQueryBase
}
// NewNumericRangeQuery creates a new NumericRangeQuery.
func NewNumericRangeQuery() *NumericRangeQuery {
q := &NumericRangeQuery{newSearchQueryBase()}
return q
}
// Min specifies the minimum value and inclusiveness for this range query.
func (q *NumericRangeQuery) Min(min float32, inclusive bool) *NumericRangeQuery {
q.options["min"] = min
q.options["inclusive_min"] = inclusive
return q
}
// Max specifies the maximum value and inclusiveness for this range query.
func (q *NumericRangeQuery) Max(max float32, inclusive bool) *NumericRangeQuery {
q.options["max"] = max
q.options["inclusive_max"] = inclusive
return q
}
// Field specifies the field for this query.
func (q *NumericRangeQuery) Field(field string) *NumericRangeQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *NumericRangeQuery) Boost(boost float32) *NumericRangeQuery {
q.options["boost"] = boost
return q
}
// DateRangeQuery represents a search date range query.
type DateRangeQuery struct {
searchQueryBase
}
// NewDateRangeQuery creates a new DateRangeQuery.
func NewDateRangeQuery() *DateRangeQuery {
q := &DateRangeQuery{newSearchQueryBase()}
return q
}
// Start specifies the start value and inclusiveness for this range query.
func (q *DateRangeQuery) Start(start string, inclusive bool) *DateRangeQuery {
q.options["start"] = start
q.options["inclusive_start"] = inclusive
return q
}
// End specifies the end value and inclusiveness for this range query.
func (q *DateRangeQuery) End(end string, inclusive bool) *DateRangeQuery {
q.options["end"] = end
q.options["inclusive_end"] = inclusive
return q
}
// DateTimeParser specifies which date time string parser to use.
func (q *DateRangeQuery) DateTimeParser(parser string) *DateRangeQuery {
q.options["datetime_parser"] = parser
return q
}
// Field specifies the field for this query.
func (q *DateRangeQuery) Field(field string) *DateRangeQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *DateRangeQuery) Boost(boost float32) *DateRangeQuery {
q.options["boost"] = boost
return q
}
// ConjunctionQuery represents a search conjunction query.
type ConjunctionQuery struct {
searchQueryBase
}
// NewConjunctionQuery creates a new ConjunctionQuery.
func NewConjunctionQuery(queries ...Query) *ConjunctionQuery {
q := &ConjunctionQuery{newSearchQueryBase()}
q.options["conjuncts"] = []Query{}
return q.And(queries...)
}
// And adds new predicate queries to this conjunction query.
func (q *ConjunctionQuery) And(queries ...Query) *ConjunctionQuery {
q.options["conjuncts"] = append(q.options["conjuncts"].([]Query), queries...)
return q
}
// Boost specifies the boost for this query.
func (q *ConjunctionQuery) Boost(boost float32) *ConjunctionQuery {
q.options["boost"] = boost
return q
}
// DisjunctionQuery represents a search disjunction query.
type DisjunctionQuery struct {
searchQueryBase
}
// NewDisjunctionQuery creates a new DisjunctionQuery.
func NewDisjunctionQuery(queries ...Query) *DisjunctionQuery {
q := &DisjunctionQuery{newSearchQueryBase()}
q.options["disjuncts"] = []Query{}
return q.Or(queries...)
}
// Or adds new predicate queries to this disjunction query.
func (q *DisjunctionQuery) Or(queries ...Query) *DisjunctionQuery {
q.options["disjuncts"] = append(q.options["disjuncts"].([]Query), queries...)
return q
}
// Boost specifies the boost for this query.
func (q *DisjunctionQuery) Boost(boost float32) *DisjunctionQuery {
q.options["boost"] = boost
return q
}
type booleanQueryData struct {
Must *ConjunctionQuery `json:"must,omitempty"`
Should *DisjunctionQuery `json:"should,omitempty"`
MustNot *DisjunctionQuery `json:"must_not,omitempty"`
Boost float32 `json:"boost,omitempty"`
}
// BooleanQuery represents a search boolean query.
type BooleanQuery struct {
data booleanQueryData
shouldMin uint32
}
// NewBooleanQuery creates a new BooleanQuery.
func NewBooleanQuery() *BooleanQuery {
q := &BooleanQuery{}
return q
}
// Must specifies a query which must match.
func (q *BooleanQuery) Must(query Query) *BooleanQuery {
switch val := query.(type) {
case ConjunctionQuery:
q.data.Must = &val
case *ConjunctionQuery:
q.data.Must = val
default:
q.data.Must = NewConjunctionQuery(val)
}
return q
}
// Should specifies a query which should match.
func (q *BooleanQuery) Should(query Query) *BooleanQuery {
switch val := query.(type) {
case DisjunctionQuery:
q.data.Should = &val
case *DisjunctionQuery:
q.data.Should = val
default:
q.data.Should = NewDisjunctionQuery(val)
}
return q
}
// MustNot specifies a query which must not match.
func (q *BooleanQuery) MustNot(query Query) *BooleanQuery {
switch val := query.(type) {
case DisjunctionQuery:
q.data.MustNot = &val
case *DisjunctionQuery:
q.data.MustNot = val
default:
q.data.MustNot = NewDisjunctionQuery(val)
}
return q
}
// ShouldMin specifies the minimum value before the should query will boost.
func (q *BooleanQuery) ShouldMin(min uint32) *BooleanQuery {
q.shouldMin = min
return q
}
// Boost specifies the boost for this query.
func (q *BooleanQuery) Boost(boost float32) *BooleanQuery {
q.data.Boost = boost
return q
}
// MarshalJSON marshal's this query to JSON for the search REST API.
func (q *BooleanQuery) MarshalJSON() ([]byte, error) {
if q.data.Should != nil {
q.data.Should.options["min"] = q.shouldMin
}
bytes, err := json.Marshal(q.data)
if q.data.Should != nil {
delete(q.data.Should.options, "min")
}
return bytes, err
}
// WildcardQuery represents a search wildcard query.
type WildcardQuery struct {
searchQueryBase
}
// NewWildcardQuery creates a new WildcardQuery.
func NewWildcardQuery(wildcard string) *WildcardQuery {
q := &WildcardQuery{newSearchQueryBase()}
q.options["wildcard"] = wildcard
return q
}
// Field specifies the field for this query.
func (q *WildcardQuery) Field(field string) *WildcardQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *WildcardQuery) Boost(boost float32) *WildcardQuery {
q.options["boost"] = boost
return q
}
// DocIDQuery represents a search document id query.
type DocIDQuery struct {
searchQueryBase
}
// NewDocIDQuery creates a new DocIdQuery.
func NewDocIDQuery(ids ...string) *DocIDQuery {
q := &DocIDQuery{newSearchQueryBase()}
q.options["ids"] = []string{}
return q.AddDocIds(ids...)
}
// AddDocIds adds addition document ids to this query.
func (q *DocIDQuery) AddDocIds(ids ...string) *DocIDQuery {
q.options["ids"] = append(q.options["ids"].([]string), ids...)
return q
}
// Field specifies the field for this query.
func (q *DocIDQuery) Field(field string) *DocIDQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *DocIDQuery) Boost(boost float32) *DocIDQuery {
q.options["boost"] = boost
return q
}
// BooleanFieldQuery represents a search boolean field query.
type BooleanFieldQuery struct {
searchQueryBase
}
// NewBooleanFieldQuery creates a new BooleanFieldQuery.
func NewBooleanFieldQuery(val bool) *BooleanFieldQuery {
q := &BooleanFieldQuery{newSearchQueryBase()}
q.options["bool"] = val
return q
}
// Field specifies the field for this query.
func (q *BooleanFieldQuery) Field(field string) *BooleanFieldQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *BooleanFieldQuery) Boost(boost float32) *BooleanFieldQuery {
q.options["boost"] = boost
return q
}
// TermQuery represents a search term query.
type TermQuery struct {
searchQueryBase
}
// NewTermQuery creates a new TermQuery.
func NewTermQuery(term string) *TermQuery {
q := &TermQuery{newSearchQueryBase()}
q.options["term"] = term
return q
}
// Field specifies the field for this query.
func (q *TermQuery) Field(field string) *TermQuery {
q.options["field"] = field
return q
}
// PrefixLength specifies the prefix length from this query.
func (q *TermQuery) PrefixLength(length uint64) *TermQuery {
q.options["prefix_length"] = length
return q
}
// Fuzziness specifies the fuziness for this query.
func (q *TermQuery) Fuzziness(fuzziness uint64) *TermQuery {
q.options["fuzziness"] = fuzziness
return q
}
// Boost specifies the boost for this query.
func (q *TermQuery) Boost(boost float32) *TermQuery {
q.options["boost"] = boost
return q
}
// PhraseQuery represents a search phrase query.
type PhraseQuery struct {
searchQueryBase
}
// NewPhraseQuery creates a new PhraseQuery.
func NewPhraseQuery(terms ...string) *PhraseQuery {
q := &PhraseQuery{newSearchQueryBase()}
q.options["terms"] = terms
return q
}
// Field specifies the field for this query.
func (q *PhraseQuery) Field(field string) *PhraseQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *PhraseQuery) Boost(boost float32) *PhraseQuery {
q.options["boost"] = boost
return q
}
// PrefixQuery represents a search prefix query.
type PrefixQuery struct {
searchQueryBase
}
// NewPrefixQuery creates a new PrefixQuery.
func NewPrefixQuery(prefix string) *PrefixQuery {
q := &PrefixQuery{newSearchQueryBase()}
q.options["prefix"] = prefix
return q
}
// Field specifies the field for this query.
func (q *PrefixQuery) Field(field string) *PrefixQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *PrefixQuery) Boost(boost float32) *PrefixQuery {
q.options["boost"] = boost
return q
}
// MatchAllQuery represents a search match all query.
type MatchAllQuery struct {
searchQueryBase
}
// NewMatchAllQuery creates a new MatchAllQuery.
func NewMatchAllQuery() *MatchAllQuery {
q := &MatchAllQuery{newSearchQueryBase()}
q.options["match_all"] = nil
return q
}
// MatchNoneQuery represents a search match none query.
type MatchNoneQuery struct {
searchQueryBase
}
// NewMatchNoneQuery creates a new MatchNoneQuery.
func NewMatchNoneQuery() *MatchNoneQuery {
q := &MatchNoneQuery{newSearchQueryBase()}
q.options["match_none"] = nil
return q
}
// TermRangeQuery represents a search term range query.
type TermRangeQuery struct {
searchQueryBase
}
// NewTermRangeQuery creates a new TermRangeQuery.
func NewTermRangeQuery(term string) *TermRangeQuery {
q := &TermRangeQuery{newSearchQueryBase()}
q.options["term"] = term
return q
}
// Field specifies the field for this query.
func (q *TermRangeQuery) Field(field string) *TermRangeQuery {
q.options["field"] = field
return q
}
// Min specifies the minimum value and inclusiveness for this range query.
func (q *TermRangeQuery) Min(min string, inclusive bool) *TermRangeQuery {
q.options["min"] = min
q.options["inclusive_min"] = inclusive
return q
}
// Max specifies the maximum value and inclusiveness for this range query.
func (q *TermRangeQuery) Max(max string, inclusive bool) *TermRangeQuery {
q.options["max"] = max
q.options["inclusive_max"] = inclusive
return q
}
// Boost specifies the boost for this query.
func (q *TermRangeQuery) Boost(boost float32) *TermRangeQuery {
q.options["boost"] = boost
return q
}
// GeoDistanceQuery represents a search geographical distance query.
type GeoDistanceQuery struct {
searchQueryBase
}
// NewGeoDistanceQuery creates a new GeoDistanceQuery.
func NewGeoDistanceQuery(lon, lat float64, distance string) *GeoDistanceQuery {
q := &GeoDistanceQuery{newSearchQueryBase()}
q.options["location"] = []float64{lon, lat}
q.options["distance"] = distance
return q
}
// Field specifies the field for this query.
func (q *GeoDistanceQuery) Field(field string) *GeoDistanceQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *GeoDistanceQuery) Boost(boost float32) *GeoDistanceQuery {
q.options["boost"] = boost
return q
}
// GeoBoundingBoxQuery represents a search geographical bounding box query.
type GeoBoundingBoxQuery struct {
searchQueryBase
}
// NewGeoBoundingBoxQuery creates a new GeoBoundingBoxQuery.
func NewGeoBoundingBoxQuery(tlLon, tlLat, brLon, brLat float64) *GeoBoundingBoxQuery {
q := &GeoBoundingBoxQuery{newSearchQueryBase()}
q.options["top_left"] = []float64{tlLon, tlLat}
q.options["bottom_right"] = []float64{brLon, brLat}
return q
}
// Field specifies the field for this query.
func (q *GeoBoundingBoxQuery) Field(field string) *GeoBoundingBoxQuery {
q.options["field"] = field
return q
}
// Boost specifies the boost for this query.
func (q *GeoBoundingBoxQuery) Boost(boost float32) *GeoBoundingBoxQuery {
q.options["boost"] = boost
return q
}

View File

@@ -1,123 +0,0 @@
package search
import (
"encoding/json"
)
// SearchSort represents an search sorting for a search query.
type Sort interface {
}
type searchSortBase struct {
options map[string]interface{}
}
func newSearchSortBase() searchSortBase {
return searchSortBase{
options: make(map[string]interface{}),
}
}
// MarshalJSON marshal's this query to JSON for the search REST API.
func (q searchSortBase) MarshalJSON() ([]byte, error) {
return json.Marshal(q.options)
}
// SearchSortScore represents a search score sort.
type SearchSortScore struct {
searchSortBase
}
// NewSearchSortScore creates a new SearchSortScore.
func NewSearchSortScore() *SearchSortScore {
q := &SearchSortScore{newSearchSortBase()}
q.options["by"] = "score"
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortScore) Descending(descending bool) *SearchSortScore {
q.options["desc"] = descending
return q
}
// SearchSortID represents a search Document ID sort.
type SearchSortID struct {
searchSortBase
}
// NewSearchSortID creates a new SearchSortScore.
func NewSearchSortID() *SearchSortID {
q := &SearchSortID{newSearchSortBase()}
q.options["by"] = "id"
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortID) Descending(descending bool) *SearchSortID {
q.options["desc"] = descending
return q
}
// SearchSortField represents a search field sort.
type SearchSortField struct {
searchSortBase
}
// NewSearchSortField creates a new SearchSortField.
func NewSearchSortField(field string) *SearchSortField {
q := &SearchSortField{newSearchSortBase()}
q.options["by"] = "field"
q.options["field"] = field
return q
}
// Type allows you to specify the search field sort type.
func (q *SearchSortField) Type(value string) *SearchSortField {
q.options["type"] = value
return q
}
// Mode allows you to specify the search field sort mode.
func (q *SearchSortField) Mode(mode string) *SearchSortField {
q.options["mode"] = mode
return q
}
// Missing allows you to specify the search field sort missing behaviour.
func (q *SearchSortField) Missing(missing string) *SearchSortField {
q.options["missing"] = missing
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortField) Descending(descending bool) *SearchSortField {
q.options["desc"] = descending
return q
}
// SearchSortGeoDistance represents a search geo sort.
type SearchSortGeoDistance struct {
searchSortBase
}
// NewSearchSortGeoDistance creates a new SearchSortGeoDistance.
func NewSearchSortGeoDistance(field string, lon, lat float64) *SearchSortGeoDistance {
q := &SearchSortGeoDistance{newSearchSortBase()}
q.options["by"] = "geo_distance"
q.options["field"] = field
q.options["location"] = []float64{lon, lat}
return q
}
// Unit specifies the unit used for sorting
func (q *SearchSortGeoDistance) Unit(unit string) *SearchSortGeoDistance {
q.options["unit"] = unit
return q
}
// Descending specifies the ordering of the results.
func (q *SearchSortGeoDistance) Descending(descending bool) *SearchSortGeoDistance {
q.options["desc"] = descending
return q
}

View File

@@ -1,138 +0,0 @@
package gocb
import (
"time"
cbsearch "github.com/couchbase/gocb/v2/search"
)
// SearchHighlightStyle indicates the type of highlighting to use for a search query.
type SearchHighlightStyle string
const (
// DefaultHighlightStyle specifies to use the default to highlight search result hits.
DefaultHighlightStyle SearchHighlightStyle = ""
// HTMLHighlightStyle specifies to use HTML tags to highlight search result hits.
HTMLHighlightStyle SearchHighlightStyle = "html"
// AnsiHightlightStyle specifies to use ANSI tags to highlight search result hits.
AnsiHightlightStyle SearchHighlightStyle = "ansi"
)
// SearchScanConsistency indicates the level of data consistency desired for a search query.
type SearchScanConsistency uint
const (
searchScanConsistencyNotSet SearchScanConsistency = iota
// SearchScanConsistencyNotBounded indicates no data consistency is required.
SearchScanConsistencyNotBounded
)
// SearchHighlightOptions are the options available for search highlighting.
type SearchHighlightOptions struct {
Style SearchHighlightStyle
Fields []string
}
// SearchOptions represents a pending search query.
type SearchOptions struct {
ScanConsistency SearchScanConsistency
Limit uint32
Skip uint32
Explain bool
Highlight *SearchHighlightOptions
Fields []string
Sort []cbsearch.Sort
Facets map[string]cbsearch.Facet
ConsistentWith *MutationState
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]interface{}
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *SearchOptions) toMap() (map[string]interface{}, error) {
data := make(map[string]interface{})
if opts.Limit > 0 {
data["size"] = opts.Limit
}
if opts.Skip > 0 {
data["from"] = opts.Skip
}
if opts.Explain {
data["explain"] = opts.Explain
}
if len(opts.Fields) > 0 {
data["fields"] = opts.Fields
}
if len(opts.Sort) > 0 {
data["sort"] = opts.Sort
}
if opts.Highlight != nil {
highlight := make(map[string]interface{})
highlight["style"] = string(opts.Highlight.Style)
highlight["fields"] = opts.Highlight.Fields
data["highlight"] = highlight
}
if opts.Facets != nil {
facets := make(map[string]interface{})
for k, v := range opts.Facets {
facets[k] = v
}
data["facets"] = facets
}
if opts.ScanConsistency != 0 && opts.ConsistentWith != nil {
return nil, makeInvalidArgumentsError("ScanConsistency and ConsistentWith must be used exclusively")
}
var ctl map[string]interface{}
if opts.ScanConsistency != searchScanConsistencyNotSet {
consistency := make(map[string]interface{})
if opts.ScanConsistency == SearchScanConsistencyNotBounded {
consistency["level"] = "not_bounded"
} else {
return nil, makeInvalidArgumentsError("unexpected consistency option")
}
ctl = map[string]interface{}{"consistency": consistency}
}
if opts.ConsistentWith != nil {
consistency := make(map[string]interface{})
consistency["level"] = "at_plus"
consistency["vectors"] = opts.ConsistentWith.toSearchMutationState()
if ctl == nil {
ctl = make(map[string]interface{})
}
ctl["consistency"] = consistency
}
if ctl != nil {
data["ctl"] = ctl
}
if opts.Raw != nil {
for k, v := range opts.Raw {
data[k] = v
}
}
return data, nil
}

View File

@@ -1,327 +0,0 @@
package gocb
import "github.com/couchbase/gocbcore/v9/memd"
// LookupInSpec is the representation of an operation available when calling LookupIn
type LookupInSpec struct {
op memd.SubDocOpType
path string
isXattr bool
}
// MutateInSpec is the representation of an operation available when calling MutateIn
type MutateInSpec struct {
op memd.SubDocOpType
createPath bool
isXattr bool
path string
value interface{}
multiValue bool
}
// GetSpecOptions are the options available to LookupIn subdoc Get operations.
type GetSpecOptions struct {
IsXattr bool
}
// GetSpec indicates a path to be retrieved from the document. The value of the path
// can later be retrieved from the LookupResult.
// The path syntax follows query's path syntax (e.g. `foo.bar.baz`).
func GetSpec(path string, opts *GetSpecOptions) LookupInSpec {
if opts == nil {
opts = &GetSpecOptions{}
}
return LookupInSpec{
op: memd.SubDocOpGet,
path: path,
isXattr: opts.IsXattr,
}
}
// ExistsSpecOptions are the options available to LookupIn subdoc Exists operations.
type ExistsSpecOptions struct {
IsXattr bool
}
// ExistsSpec is similar to Path(), but does not actually retrieve the value from the server.
// This may save bandwidth if you only need to check for the existence of a
// path (without caring for its content). You can check the status of this
// operation by using .ContentAt (and ignoring the value) or .Exists() on the LookupResult.
func ExistsSpec(path string, opts *ExistsSpecOptions) LookupInSpec {
if opts == nil {
opts = &ExistsSpecOptions{}
}
return LookupInSpec{
op: memd.SubDocOpExists,
path: path,
isXattr: opts.IsXattr,
}
}
// CountSpecOptions are the options available to LookupIn subdoc Count operations.
type CountSpecOptions struct {
IsXattr bool
}
// CountSpec allows you to retrieve the number of items in an array or keys within an
// dictionary within an element of a document.
func CountSpec(path string, opts *CountSpecOptions) LookupInSpec {
if opts == nil {
opts = &CountSpecOptions{}
}
return LookupInSpec{
op: memd.SubDocOpGetCount,
path: path,
isXattr: opts.IsXattr,
}
}
// InsertSpecOptions are the options available to subdocument Insert operations.
type InsertSpecOptions struct {
CreatePath bool
IsXattr bool
}
// InsertSpec inserts a value at the specified path within the document.
func InsertSpec(path string, val interface{}, opts *InsertSpecOptions) MutateInSpec {
if opts == nil {
opts = &InsertSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpDictAdd,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// UpsertSpecOptions are the options available to subdocument Upsert operations.
type UpsertSpecOptions struct {
CreatePath bool
IsXattr bool
}
// UpsertSpec creates a new value at the specified path within the document if it does not exist, if it does exist then it
// updates it.
func UpsertSpec(path string, val interface{}, opts *UpsertSpecOptions) MutateInSpec {
if opts == nil {
opts = &UpsertSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpDictSet,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// ReplaceSpecOptions are the options available to subdocument Replace operations.
type ReplaceSpecOptions struct {
IsXattr bool
}
// ReplaceSpec replaces the value of the field at path.
func ReplaceSpec(path string, val interface{}, opts *ReplaceSpecOptions) MutateInSpec {
if opts == nil {
opts = &ReplaceSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpReplace,
createPath: false,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// RemoveSpecOptions are the options available to subdocument Remove operations.
type RemoveSpecOptions struct {
IsXattr bool
}
// RemoveSpec removes the field at path.
func RemoveSpec(path string, opts *RemoveSpecOptions) MutateInSpec {
if opts == nil {
opts = &RemoveSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpDelete,
createPath: false,
isXattr: opts.IsXattr,
path: path,
value: nil,
multiValue: false,
}
}
// ArrayAppendSpecOptions are the options available to subdocument ArrayAppend operations.
type ArrayAppendSpecOptions struct {
CreatePath bool
IsXattr bool
// HasMultiple adds multiple values as elements to an array.
// When used `value` in the spec must be an array type
// ArrayAppend("path", []int{1,2,3,4}, ArrayAppendSpecOptions{HasMultiple:true}) =>
// "path" [..., 1,2,3,4]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// spec.ArrayAppend("path", 1, nil)
// spec.ArrayAppend("path", 2, nil)
// spec.ArrayAppend("path", 3, nil)
HasMultiple bool
}
// ArrayAppendSpec adds an element(s) to the end (i.e. right) of an array
func ArrayAppendSpec(path string, val interface{}, opts *ArrayAppendSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayAppendSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayPushLast,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: opts.HasMultiple,
}
}
// ArrayPrependSpecOptions are the options available to subdocument ArrayPrepend operations.
type ArrayPrependSpecOptions struct {
CreatePath bool
IsXattr bool
// HasMultiple adds multiple values as elements to an array.
// When used `value` in the spec must be an array type
// ArrayPrepend("path", []int{1,2,3,4}, ArrayPrependSpecOptions{HasMultiple:true}) =>
// "path" [1,2,3,4, ....]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// spec.ArrayPrepend("path", 1, nil)
// spec.ArrayPrepend("path", 2, nil)
// spec.ArrayPrepend("path", 3, nil)
HasMultiple bool
}
// ArrayPrependSpec adds an element to the beginning (i.e. left) of an array
func ArrayPrependSpec(path string, val interface{}, opts *ArrayPrependSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayPrependSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayPushFirst,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: opts.HasMultiple,
}
}
// ArrayInsertSpecOptions are the options available to subdocument ArrayInsert operations.
type ArrayInsertSpecOptions struct {
CreatePath bool
IsXattr bool
// HasMultiple adds multiple values as elements to an array.
// When used `value` in the spec must be an array type
// ArrayInsert("path[1]", []int{1,2,3,4}, ArrayInsertSpecOptions{HasMultiple:true}) =>
// "path" [..., 1,2,3,4]
//
// This is a more efficient version (at both the network and server levels)
// of doing
// spec.ArrayInsert("path[2]", 1, nil)
// spec.ArrayInsert("path[3]", 2, nil)
// spec.ArrayInsert("path[4]", 3, nil)
HasMultiple bool
}
// ArrayInsertSpec inserts an element at a given position within an array. The position should be
// specified as part of the path, e.g. path.to.array[3]
func ArrayInsertSpec(path string, val interface{}, opts *ArrayInsertSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayInsertSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayInsert,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: opts.HasMultiple,
}
}
// ArrayAddUniqueSpecOptions are the options available to subdocument ArrayAddUnique operations.
type ArrayAddUniqueSpecOptions struct {
CreatePath bool
IsXattr bool
}
// ArrayAddUniqueSpec adds an dictionary add unique operation to this mutation operation set.
func ArrayAddUniqueSpec(path string, val interface{}, opts *ArrayAddUniqueSpecOptions) MutateInSpec {
if opts == nil {
opts = &ArrayAddUniqueSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpArrayAddUnique,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: val,
multiValue: false,
}
}
// CounterSpecOptions are the options available to subdocument Increment and Decrement operations.
type CounterSpecOptions struct {
CreatePath bool
IsXattr bool
}
// IncrementSpec adds an increment operation to this mutation operation set.
func IncrementSpec(path string, delta int64, opts *CounterSpecOptions) MutateInSpec {
if opts == nil {
opts = &CounterSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpCounter,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: delta,
multiValue: false,
}
}
// DecrementSpec adds a decrement operation to this mutation operation set.
func DecrementSpec(path string, delta int64, opts *CounterSpecOptions) MutateInSpec {
if opts == nil {
opts = &CounterSpecOptions{}
}
return MutateInSpec{
op: memd.SubDocOpCounter,
createPath: opts.CreatePath,
isXattr: opts.IsXattr,
path: path,
value: -delta,
multiValue: false,
}
}

View File

@@ -1,414 +0,0 @@
package gocb
import (
"encoding/json"
"sort"
"sync"
"sync/atomic"
"time"
)
type thresholdLogGroup struct {
name string
floor time.Duration
ops []*thresholdLogSpan
lock sync.RWMutex
}
func (g *thresholdLogGroup) init(name string, floor time.Duration, size uint32) {
g.name = name
g.floor = floor
g.ops = make([]*thresholdLogSpan, 0, size)
}
func (g *thresholdLogGroup) recordOp(span *thresholdLogSpan) {
if span.duration < g.floor {
return
}
// Preemptively check that we actually need to be inserted using a read lock first
// this is a performance improvement measure to avoid locking the mutex all the time.
g.lock.RLock()
if len(g.ops) == cap(g.ops) && span.duration < g.ops[0].duration {
// we are at capacity and we are faster than the fastest slow op
g.lock.RUnlock()
return
}
g.lock.RUnlock()
g.lock.Lock()
if len(g.ops) == cap(g.ops) && span.duration < g.ops[0].duration {
// we are at capacity and we are faster than the fastest slow op
g.lock.Unlock()
return
}
l := len(g.ops)
i := sort.Search(l, func(i int) bool { return span.duration < g.ops[i].duration })
// i represents the slot where it should be inserted
if len(g.ops) < cap(g.ops) {
if i == l {
g.ops = append(g.ops, span)
} else {
g.ops = append(g.ops, nil)
copy(g.ops[i+1:], g.ops[i:])
g.ops[i] = span
}
} else {
if i == 0 {
g.ops[i] = span
} else {
copy(g.ops[0:i-1], g.ops[1:i])
g.ops[i-1] = span
}
}
g.lock.Unlock()
}
type thresholdLogItem struct {
OperationName string `json:"operation_name,omitempty"`
TotalTimeUs uint64 `json:"total_us,omitempty"`
EncodeDurationUs uint64 `json:"encode_us,omitempty"`
DispatchDurationUs uint64 `json:"dispatch_us,omitempty"`
ServerDurationUs uint64 `json:"server_us,omitempty"`
LastRemoteAddress string `json:"last_remote_address,omitempty"`
LastLocalAddress string `json:"last_local_address,omitempty"`
LastDispatchDurationUs uint64 `json:"last_dispatch_us,omitempty"`
LastOperationID string `json:"last_operation_id,omitempty"`
LastLocalID string `json:"last_local_id,omitempty"`
DocumentKey string `json:"document_key,omitempty"`
}
type thresholdLogService struct {
Service string `json:"service"`
Count uint64 `json:"count"`
Top []thresholdLogItem `json:"top"`
}
func (g *thresholdLogGroup) logRecordedRecords(sampleSize uint32) {
// Preallocate space to copy the ops into...
oldOps := make([]*thresholdLogSpan, sampleSize)
g.lock.Lock()
// Escape early if we have no ops to log...
if len(g.ops) == 0 {
g.lock.Unlock()
return
}
// Copy out our ops so we can cheaply print them out without blocking
// our ops from actually being recorded in other goroutines (which would
// effectively slow down the op pipeline for logging).
oldOps = oldOps[0:len(g.ops)]
copy(oldOps, g.ops)
g.ops = g.ops[:0]
g.lock.Unlock()
jsonData := thresholdLogService{
Service: g.name,
}
for i := len(oldOps) - 1; i >= 0; i-- {
op := oldOps[i]
jsonData.Top = append(jsonData.Top, thresholdLogItem{
OperationName: op.opName,
TotalTimeUs: uint64(op.duration / time.Microsecond),
DispatchDurationUs: uint64(op.totalDispatchDuration / time.Microsecond),
ServerDurationUs: uint64(op.totalServerDuration / time.Microsecond),
EncodeDurationUs: uint64(op.totalEncodeDuration / time.Microsecond),
LastRemoteAddress: op.lastDispatchPeer,
LastDispatchDurationUs: uint64(op.lastDispatchDuration / time.Microsecond),
LastOperationID: op.lastOperationID,
LastLocalID: op.lastLocalID,
DocumentKey: op.documentKey,
})
}
jsonData.Count = uint64(len(jsonData.Top))
jsonBytes, err := json.Marshal(jsonData)
if err != nil {
logDebugf("Failed to generate threshold logging service JSON: %s", err)
}
logInfof("Threshold Log: %s", jsonBytes)
}
// ThresholdLoggingOptions is the set of options available for configuring threshold logging.
type ThresholdLoggingOptions struct {
ServerDurationDisabled bool
Interval time.Duration
SampleSize uint32
KVThreshold time.Duration
ViewsThreshold time.Duration
QueryThreshold time.Duration
SearchThreshold time.Duration
AnalyticsThreshold time.Duration
ManagementThreshold time.Duration
}
// thresholdLoggingTracer is a specialized Tracer implementation which will automatically
// log operations which fall outside of a set of thresholds. Note that this tracer is
// only safe for use within the Couchbase SDK, uses by external event sources are
// likely to fail.
type thresholdLoggingTracer struct {
Interval time.Duration
SampleSize uint32
KVThreshold time.Duration
ViewsThreshold time.Duration
QueryThreshold time.Duration
SearchThreshold time.Duration
AnalyticsThreshold time.Duration
ManagementThreshold time.Duration
killCh chan struct{}
refCount int32
nextTick time.Time
kvGroup thresholdLogGroup
viewsGroup thresholdLogGroup
queryGroup thresholdLogGroup
searchGroup thresholdLogGroup
analyticsGroup thresholdLogGroup
managementGroup thresholdLogGroup
}
func newThresholdLoggingTracer(opts *ThresholdLoggingOptions) *thresholdLoggingTracer {
if opts == nil {
opts = &ThresholdLoggingOptions{}
}
if opts.Interval == 0 {
opts.Interval = 10 * time.Second
}
if opts.SampleSize == 0 {
opts.SampleSize = 10
}
if opts.KVThreshold == 0 {
opts.KVThreshold = 500 * time.Millisecond
}
if opts.ViewsThreshold == 0 {
opts.ViewsThreshold = 1 * time.Second
}
if opts.QueryThreshold == 0 {
opts.QueryThreshold = 1 * time.Second
}
if opts.SearchThreshold == 0 {
opts.SearchThreshold = 1 * time.Second
}
if opts.AnalyticsThreshold == 0 {
opts.AnalyticsThreshold = 1 * time.Second
}
if opts.ManagementThreshold == 0 {
opts.ManagementThreshold = 1 * time.Second
}
t := &thresholdLoggingTracer{
Interval: opts.Interval,
SampleSize: opts.SampleSize,
KVThreshold: opts.KVThreshold,
ViewsThreshold: opts.ViewsThreshold,
QueryThreshold: opts.QueryThreshold,
SearchThreshold: opts.SearchThreshold,
AnalyticsThreshold: opts.AnalyticsThreshold,
ManagementThreshold: opts.ManagementThreshold,
}
t.kvGroup.init("kv", t.KVThreshold, t.SampleSize)
t.viewsGroup.init("views", t.ViewsThreshold, t.SampleSize)
t.queryGroup.init("query", t.QueryThreshold, t.SampleSize)
t.searchGroup.init("search", t.SearchThreshold, t.SampleSize)
t.analyticsGroup.init("analytics", t.AnalyticsThreshold, t.SampleSize)
t.managementGroup.init("management", t.ManagementThreshold, t.SampleSize)
if t.killCh == nil {
t.killCh = make(chan struct{})
}
if t.nextTick.IsZero() {
t.nextTick = time.Now().Add(t.Interval)
}
return t
}
// AddRef is used internally to keep track of the number of Cluster instances referring to it.
// This is used to correctly shut down the aggregation routines once there are no longer any
// instances tracing to it.
func (t *thresholdLoggingTracer) AddRef() int32 {
newRefCount := atomic.AddInt32(&t.refCount, 1)
if newRefCount == 1 {
t.startLoggerRoutine()
}
return newRefCount
}
// DecRef is the counterpart to AddRef (see AddRef for more information).
func (t *thresholdLoggingTracer) DecRef() int32 {
newRefCount := atomic.AddInt32(&t.refCount, -1)
if newRefCount == 0 {
t.killCh <- struct{}{}
}
return newRefCount
}
func (t *thresholdLoggingTracer) logRecordedRecords() {
t.kvGroup.logRecordedRecords(t.SampleSize)
t.viewsGroup.logRecordedRecords(t.SampleSize)
t.queryGroup.logRecordedRecords(t.SampleSize)
t.searchGroup.logRecordedRecords(t.SampleSize)
t.analyticsGroup.logRecordedRecords(t.SampleSize)
t.managementGroup.logRecordedRecords(t.SampleSize)
}
func (t *thresholdLoggingTracer) startLoggerRoutine() {
go t.loggerRoutine()
}
func (t *thresholdLoggingTracer) loggerRoutine() {
for {
select {
case <-time.After(time.Until(t.nextTick)):
t.nextTick = t.nextTick.Add(t.Interval)
t.logRecordedRecords()
case <-t.killCh:
t.logRecordedRecords()
return
}
}
}
func (t *thresholdLoggingTracer) recordOp(span *thresholdLogSpan) {
switch span.serviceName {
case "mgmt":
t.managementGroup.recordOp(span)
case "kv":
t.kvGroup.recordOp(span)
case "views":
t.viewsGroup.recordOp(span)
case "query":
t.queryGroup.recordOp(span)
case "search":
t.searchGroup.recordOp(span)
case "analytics":
t.analyticsGroup.recordOp(span)
}
}
// StartSpan belongs to the Tracer interface.
func (t *thresholdLoggingTracer) StartSpan(operationName string, parentContext requestSpanContext) requestSpan {
span := &thresholdLogSpan{
tracer: t,
opName: operationName,
startTime: time.Now(),
}
if context, ok := parentContext.(*thresholdLogSpanContext); ok {
span.parent = context.span
}
return span
}
type thresholdLogSpan struct {
tracer *thresholdLoggingTracer
parent *thresholdLogSpan
opName string
startTime time.Time
serviceName string
peerAddress string
serverDuration time.Duration
duration time.Duration
totalServerDuration time.Duration
totalDispatchDuration time.Duration
totalEncodeDuration time.Duration
lastDispatchPeer string
lastDispatchDuration time.Duration
lastOperationID string
lastLocalID string
documentKey string
lock sync.Mutex
}
func (n *thresholdLogSpan) Context() requestSpanContext {
return &thresholdLogSpanContext{n}
}
func (n *thresholdLogSpan) SetTag(key string, value interface{}) requestSpan {
var ok bool
switch key {
case "server_duration":
if n.serverDuration, ok = value.(time.Duration); !ok {
logDebugf("Failed to cast span server_duration tag")
}
case "couchbase.service":
if n.serviceName, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.service tag")
}
case "peer.address":
if n.peerAddress, ok = value.(string); !ok {
logDebugf("Failed to cast span peer.address tag")
}
case "couchbase.operation_id":
if n.lastOperationID, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.operation_id tag")
}
case "couchbase.document_key":
if n.documentKey, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.document_key tag")
}
case "couchbase.local_id":
if n.lastLocalID, ok = value.(string); !ok {
logDebugf("Failed to cast span couchbase.local_id tag")
}
}
return n
}
func (n *thresholdLogSpan) Finish() {
n.duration = time.Since(n.startTime)
n.totalServerDuration += n.serverDuration
if n.opName == "dispatch" {
n.totalDispatchDuration += n.duration
n.lastDispatchPeer = n.peerAddress
n.lastDispatchDuration = n.duration
}
if n.opName == "encode" {
n.totalEncodeDuration += n.duration
}
if n.parent != nil {
n.parent.lock.Lock()
n.parent.totalServerDuration += n.totalServerDuration
n.parent.totalDispatchDuration += n.totalDispatchDuration
n.parent.totalEncodeDuration += n.totalEncodeDuration
if n.lastDispatchPeer != "" || n.lastDispatchDuration > 0 {
n.parent.lastDispatchPeer = n.lastDispatchPeer
n.parent.lastDispatchDuration = n.lastDispatchDuration
}
if n.lastOperationID != "" {
n.parent.lastOperationID = n.lastOperationID
}
if n.lastLocalID != "" {
n.parent.lastLocalID = n.lastLocalID
}
if n.documentKey != "" {
n.parent.documentKey = n.documentKey
}
n.parent.lock.Unlock()
}
if n.serviceName != "" {
n.tracer.recordOp(n)
}
}
type thresholdLogSpanContext struct {
span *thresholdLogSpan
}

View File

@@ -1,183 +0,0 @@
package gocb
import (
"encoding/json"
"fmt"
"strconv"
gocbcore "github.com/couchbase/gocbcore/v9"
)
// MutationToken holds the mutation state information from an operation.
type MutationToken struct {
token gocbcore.MutationToken
bucketName string
}
type bucketToken struct {
SeqNo uint64 `json:"seqno"`
VbUUID string `json:"vbuuid"`
}
// BucketName returns the name of the bucket that this token belongs to.
func (mt MutationToken) BucketName() string {
return mt.bucketName
}
// PartitionUUID returns the UUID of the vbucket that this token belongs to.
func (mt MutationToken) PartitionUUID() uint64 {
return uint64(mt.token.VbUUID)
}
// PartitionID returns the ID of the vbucket that this token belongs to.
func (mt MutationToken) PartitionID() uint64 {
return uint64(mt.token.VbID)
}
// SequenceNumber returns the sequence number of the vbucket that this token belongs to.
func (mt MutationToken) SequenceNumber() uint64 {
return uint64(mt.token.SeqNo)
}
func (mt bucketToken) MarshalJSON() ([]byte, error) {
info := []interface{}{mt.SeqNo, mt.VbUUID}
return json.Marshal(info)
}
func (mt *bucketToken) UnmarshalJSON(data []byte) error {
info := []interface{}{&mt.SeqNo, &mt.VbUUID}
return json.Unmarshal(data, &info)
}
type bucketTokens map[string]*bucketToken
type mutationStateData map[string]*bucketTokens
type searchMutationState map[string]map[string]uint64
// MutationState holds and aggregates MutationToken's across multiple operations.
type MutationState struct {
tokens []MutationToken
}
// NewMutationState creates a new MutationState for tracking mutation state.
func NewMutationState(tokens ...MutationToken) *MutationState {
mt := &MutationState{}
mt.Add(tokens...)
return mt
}
// Add includes an operation's mutation information in this mutation state.
func (mt *MutationState) Add(tokens ...MutationToken) {
for _, token := range tokens {
if token.bucketName != "" {
mt.tokens = append(mt.tokens, token)
}
}
}
// MutationStateInternal specifies internal operations.
// Internal: This should never be used and is not supported.
type MutationStateInternal struct {
mt *MutationState
}
// Internal return a new MutationStateInternal.
// Internal: This should never be used and is not supported.
func (mt *MutationState) Internal() *MutationStateInternal {
return &MutationStateInternal{
mt: mt,
}
}
// Add includes an operation's mutation information in this mutation state.
func (mti *MutationStateInternal) Add(bucket string, tokens ...gocbcore.MutationToken) {
for _, token := range tokens {
mti.mt.Add(MutationToken{
bucketName: bucket,
token: token,
})
}
}
// Tokens returns the tokens belonging to the mutation state.
func (mti *MutationStateInternal) Tokens() []MutationToken {
return mti.mt.tokens
}
// MarshalJSON marshal's this mutation state to JSON.
func (mt *MutationState) MarshalJSON() ([]byte, error) {
var data mutationStateData
for _, token := range mt.tokens {
if data == nil {
data = make(mutationStateData)
}
bucketName := token.bucketName
if (data)[bucketName] == nil {
tokens := make(bucketTokens)
(data)[bucketName] = &tokens
}
vbID := fmt.Sprintf("%d", token.token.VbID)
stateToken := (*(data)[bucketName])[vbID]
if stateToken == nil {
stateToken = &bucketToken{}
(*(data)[bucketName])[vbID] = stateToken
}
stateToken.SeqNo = uint64(token.token.SeqNo)
stateToken.VbUUID = fmt.Sprintf("%d", token.token.VbUUID)
}
return json.Marshal(data)
}
// UnmarshalJSON unmarshal's a mutation state from JSON.
func (mt *MutationState) UnmarshalJSON(data []byte) error {
var stateData mutationStateData
err := json.Unmarshal(data, &stateData)
if err != nil {
return err
}
for bucketName, bTokens := range stateData {
for vbIDStr, stateToken := range *bTokens {
vbID, err := strconv.Atoi(vbIDStr)
if err != nil {
return err
}
vbUUID, err := strconv.Atoi(stateToken.VbUUID)
if err != nil {
return err
}
token := MutationToken{
bucketName: bucketName,
token: gocbcore.MutationToken{
VbID: uint16(vbID),
VbUUID: gocbcore.VbUUID(vbUUID),
SeqNo: gocbcore.SeqNo(stateToken.SeqNo),
},
}
mt.tokens = append(mt.tokens, token)
}
}
return nil
}
// toSearchMutationState is specific to search, search doesn't accept tokens in the same format as other services.
func (mt *MutationState) toSearchMutationState() searchMutationState {
data := make(searchMutationState)
for _, token := range mt.tokens {
_, ok := data[token.bucketName]
if !ok {
data[token.bucketName] = make(map[string]uint64)
}
data[token.bucketName][fmt.Sprintf("%d/%d", token.token.VbID, token.token.VbUUID)] = uint64(token.token.SeqNo)
}
return data
}

View File

@@ -1,97 +0,0 @@
package gocb
import (
"github.com/couchbase/gocbcore/v9"
)
func tracerAddRef(tracer requestTracer) {
if tracer == nil {
return
}
if refTracer, ok := tracer.(interface {
AddRef() int32
}); ok {
refTracer.AddRef()
}
}
func tracerDecRef(tracer requestTracer) {
if tracer == nil {
return
}
if refTracer, ok := tracer.(interface {
DecRef() int32
}); ok {
refTracer.DecRef()
}
}
// requestTracer describes the tracing abstraction in the SDK.
type requestTracer interface {
StartSpan(operationName string, parentContext requestSpanContext) requestSpan
}
// requestSpan is the interface for spans that are created by a requestTracer.
type requestSpan interface {
Finish()
Context() requestSpanContext
SetTag(key string, value interface{}) requestSpan
}
// requestSpanContext is the interface for for external span contexts that can be passed in into the SDK option blocks.
type requestSpanContext interface {
}
type requestTracerWrapper struct {
tracer requestTracer
}
func (tracer *requestTracerWrapper) StartSpan(operationName string, parentContext gocbcore.RequestSpanContext) gocbcore.RequestSpan {
return requestSpanWrapper{
span: tracer.tracer.StartSpan(operationName, parentContext),
}
}
type requestSpanWrapper struct {
span requestSpan
}
func (span requestSpanWrapper) Finish() {
span.span.Finish()
}
func (span requestSpanWrapper) Context() gocbcore.RequestSpanContext {
return span.span.Context()
}
func (span requestSpanWrapper) SetTag(key string, value interface{}) gocbcore.RequestSpan {
span.span = span.span.SetTag(key, value)
return span
}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
)
// noopTracer will have a future use so we tell the linter not to flag it.
type noopTracer struct { // nolint: unused
}
func (tracer *noopTracer) StartSpan(operationName string, parentContext requestSpanContext) requestSpan {
return defaultNoopSpan
}
func (span noopSpan) Finish() {
}
func (span noopSpan) Context() requestSpanContext {
return defaultNoopSpanContext
}
func (span noopSpan) SetTag(key string, value interface{}) requestSpan {
return defaultNoopSpan
}

View File

@@ -1,398 +0,0 @@
package gocb
import (
"encoding/json"
gocbcore "github.com/couchbase/gocbcore/v9"
"github.com/pkg/errors"
)
// Transcoder provides an interface for transforming Go values to and
// from raw bytes for storage and retreival from Couchbase data storage.
type Transcoder interface {
// Decodes retrieved bytes into a Go type.
Decode([]byte, uint32, interface{}) error
// Encodes a Go type into bytes for storage.
Encode(interface{}) ([]byte, uint32, error)
}
// JSONTranscoder implements the default transcoding behavior and applies JSON transcoding to all values.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> error.
// default -> JSON value, JSON Flags.
type JSONTranscoder struct {
}
// NewJSONTranscoder returns a new JSONTranscoder.
func NewJSONTranscoder() *JSONTranscoder {
return &JSONTranscoder{}
}
// Decode applies JSON transcoding behaviour to decode into a Go type.
func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
return errors.New("binary datatype is not supported by JSONTranscoder")
} else if valueType == gocbcore.StringType {
return errors.New("string datatype is not supported by JSONTranscoder")
} else if valueType == gocbcore.JSONType {
err := json.Unmarshal(bytes, &out)
if err != nil {
return err
}
return nil
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies JSON transcoding behaviour to encode a Go type.
func (t *JSONTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
var err error
switch typeValue := value.(type) {
case []byte:
return nil, 0, errors.New("binary data is not supported by JSONTranscoder")
case *[]byte:
return nil, 0, errors.New("binary data is not supported by JSONTranscoder")
case json.RawMessage:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *json.RawMessage:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
bytes, err = json.Marshal(value)
if err != nil {
return nil, 0, err
}
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
}
// No compression supported currently
return bytes, flags, nil
}
// RawJSONTranscoder implements passthrough behavior of JSON data. This transcoder does not apply any serialization.
// It will forward data across the network without incurring unnecessary parsing costs.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> JSON bytes, JSON expectedFlags.
// string -> JSON bytes, JSON expectedFlags.
// default -> error.
type RawJSONTranscoder struct {
}
// NewRawJSONTranscoder returns a new RawJSONTranscoder.
func NewRawJSONTranscoder() *RawJSONTranscoder {
return &RawJSONTranscoder{}
}
// Decode applies raw JSON transcoding behaviour to decode into a Go type.
func (t *RawJSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
return errors.New("binary datatype is not supported by RawJSONTranscoder")
} else if valueType == gocbcore.StringType {
return errors.New("string datatype is not supported by RawJSONTranscoder")
} else if valueType == gocbcore.JSONType {
switch typedOut := out.(type) {
case *[]byte:
*typedOut = bytes
return nil
case *string:
*typedOut = string(bytes)
return nil
default:
return errors.New("you must encode raw JSON data in a byte array or string")
}
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies raw JSON transcoding behaviour to encode a Go type.
func (t *RawJSONTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
switch typeValue := value.(type) {
case []byte:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *[]byte:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case string:
bytes = []byte(typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *string:
bytes = []byte(*typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case json.RawMessage:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *json.RawMessage:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
return nil, 0, makeInvalidArgumentsError("only binary and string data is supported by RawJSONTranscoder")
}
// No compression supported currently
return bytes, flags, nil
}
// RawStringTranscoder implements passthrough behavior of raw string data. This transcoder does not apply any serialization.
//
// This will apply the following behavior to the value:
// string -> string bytes, string expectedFlags.
// default -> error.
type RawStringTranscoder struct {
}
// NewRawStringTranscoder returns a new RawStringTranscoder.
func NewRawStringTranscoder() *RawStringTranscoder {
return &RawStringTranscoder{}
}
// Decode applies raw string transcoding behaviour to decode into a Go type.
func (t *RawStringTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
return errors.New("only string datatype is supported by RawStringTranscoder")
} else if valueType == gocbcore.StringType {
switch typedOut := out.(type) {
case *string:
*typedOut = string(bytes)
return nil
case *interface{}:
*typedOut = string(bytes)
return nil
default:
return errors.New("you must encode a string in a string or interface")
}
} else if valueType == gocbcore.JSONType {
return errors.New("only string datatype is supported by RawStringTranscoder")
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies raw string transcoding behaviour to encode a Go type.
func (t *RawStringTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
switch typeValue := value.(type) {
case string:
bytes = []byte(typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case *string:
bytes = []byte(*typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
return nil, 0, makeInvalidArgumentsError("only raw string data is supported by RawStringTranscoder")
}
// No compression supported currently
return bytes, flags, nil
}
// RawBinaryTranscoder implements passthrough behavior of raw binary data. This transcoder does not apply any serialization.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> binary bytes, binary expectedFlags.
// default -> error.
type RawBinaryTranscoder struct {
}
// NewRawBinaryTranscoder returns a new RawBinaryTranscoder.
func NewRawBinaryTranscoder() *RawBinaryTranscoder {
return &RawBinaryTranscoder{}
}
// Decode applies raw binary transcoding behaviour to decode into a Go type.
func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
switch typedOut := out.(type) {
case *[]byte:
*typedOut = bytes
return nil
case *interface{}:
*typedOut = bytes
return nil
default:
return errors.New("you must encode binary in a byte array or interface")
}
} else if valueType == gocbcore.StringType {
return errors.New("only binary datatype is supported by RawBinaryTranscoder")
} else if valueType == gocbcore.JSONType {
return errors.New("only binary datatype is supported by RawBinaryTranscoder")
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies raw binary transcoding behaviour to encode a Go type.
func (t *RawBinaryTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
switch typeValue := value.(type) {
case []byte:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case *[]byte:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
return nil, 0, makeInvalidArgumentsError("only raw binary data is supported by RawBinaryTranscoder")
}
// No compression supported currently
return bytes, flags, nil
}
// LegacyTranscoder implements the behaviour for a backward-compatible transcoder. This transcoder implements
// behaviour matching that of gocb v1.
//
// This will apply the following behavior to the value:
// binary ([]byte) -> binary bytes, Binary expectedFlags.
// string -> string bytes, String expectedFlags.
// default -> JSON value, JSON expectedFlags.
type LegacyTranscoder struct {
}
// NewLegacyTranscoder returns a new LegacyTranscoder.
func NewLegacyTranscoder() *LegacyTranscoder {
return &LegacyTranscoder{}
}
// Decode applies legacy transcoding behaviour to decode into a Go type.
func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error {
valueType, compression := gocbcore.DecodeCommonFlags(flags)
// Make sure compression is disabled
if compression != gocbcore.NoCompression {
return errors.New("unexpected value compression")
}
// Normal types of decoding
if valueType == gocbcore.BinaryType {
switch typedOut := out.(type) {
case *[]byte:
*typedOut = bytes
return nil
case *interface{}:
*typedOut = bytes
return nil
default:
return errors.New("you must encode binary in a byte array or interface")
}
} else if valueType == gocbcore.StringType {
switch typedOut := out.(type) {
case *string:
*typedOut = string(bytes)
return nil
case *interface{}:
*typedOut = string(bytes)
return nil
default:
return errors.New("you must encode a string in a string or interface")
}
} else if valueType == gocbcore.JSONType {
err := json.Unmarshal(bytes, &out)
if err != nil {
return err
}
return nil
}
return errors.New("unexpected expectedFlags value")
}
// Encode applies legacy transcoding behavior to encode a Go type.
func (t *LegacyTranscoder) Encode(value interface{}) ([]byte, uint32, error) {
var bytes []byte
var flags uint32
var err error
switch typeValue := value.(type) {
case []byte:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case *[]byte:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression)
case string:
bytes = []byte(typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case *string:
bytes = []byte(*typeValue)
flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression)
case json.RawMessage:
bytes = typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *json.RawMessage:
bytes = *typeValue
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
case *interface{}:
return t.Encode(*typeValue)
default:
bytes, err = json.Marshal(value)
if err != nil {
return nil, 0, err
}
flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression)
}
// No compression supported currently
return bytes, flags, nil
}

View File

@@ -1,11 +0,0 @@
package gocb
// Version returns a string representation of the current SDK version.
func Version() string {
return goCbVersionStr
}
// Identifier returns a string representation of the current SDK identifier.
func Identifier() string {
return "gocb/" + goCbVersionStr
}

View File

@@ -1,211 +0,0 @@
package gocb
import (
"bytes"
"encoding/json"
"net/url"
"strconv"
"time"
)
// ViewScanConsistency specifies the consistency required for a view query.
type ViewScanConsistency uint
const (
// ViewScanConsistencyNotBounded indicates that no special behaviour should be used.
ViewScanConsistencyNotBounded ViewScanConsistency = iota + 1
// ViewScanConsistencyRequestPlus indicates to update the index before querying it.
ViewScanConsistencyRequestPlus
// ViewScanConsistencyUpdateAfter indicates to update the index asynchronously after querying.
ViewScanConsistencyUpdateAfter
)
// ViewOrdering specifies the ordering for the view queries results.
type ViewOrdering uint
const (
// ViewOrderingAscending indicates the query results should be sorted from lowest to highest.
ViewOrderingAscending ViewOrdering = iota + 1
// ViewOrderingDescending indicates the query results should be sorted from highest to lowest.
ViewOrderingDescending
)
// ViewErrorMode pecifies the behaviour of the query engine should an error occur during the gathering of
// view index results which would result in only partial results being available.
type ViewErrorMode uint
const (
// ViewErrorModeContinue indicates to continue gathering results on error.
ViewErrorModeContinue ViewErrorMode = iota + 1
// ViewErrorModeStop indicates to stop gathering results on error
ViewErrorModeStop
)
// ViewOptions represents the options available when executing view query.
type ViewOptions struct {
ScanConsistency ViewScanConsistency
Skip uint32
Limit uint32
Order ViewOrdering
Reduce bool
Group bool
GroupLevel uint32
Key interface{}
Keys []interface{}
StartKey interface{}
EndKey interface{}
InclusiveEnd bool
StartKeyDocID string
EndKeyDocID string
OnError ViewErrorMode
Debug bool
// Raw provides a way to provide extra parameters in the request body for the query.
Raw map[string]string
Namespace DesignDocumentNamespace
Timeout time.Duration
RetryStrategy RetryStrategy
parentSpan requestSpanContext
}
func (opts *ViewOptions) toURLValues() (*url.Values, error) {
options := &url.Values{}
if opts.ScanConsistency != 0 {
if opts.ScanConsistency == ViewScanConsistencyRequestPlus {
options.Set("stale", "false")
} else if opts.ScanConsistency == ViewScanConsistencyNotBounded {
options.Set("stale", "ok")
} else if opts.ScanConsistency == ViewScanConsistencyUpdateAfter {
options.Set("stale", "update_after")
} else {
return nil, makeInvalidArgumentsError("unexpected stale option")
}
}
if opts.Skip != 0 {
options.Set("skip", strconv.FormatUint(uint64(opts.Skip), 10))
}
if opts.Limit != 0 {
options.Set("limit", strconv.FormatUint(uint64(opts.Limit), 10))
}
if opts.Order != 0 {
if opts.Order == ViewOrderingAscending {
options.Set("descending", "false")
} else if opts.Order == ViewOrderingDescending {
options.Set("descending", "true")
} else {
return nil, makeInvalidArgumentsError("unexpected order option")
}
}
options.Set("reduce", "false") // is this line necessary?
if opts.Reduce {
options.Set("reduce", "true")
// Only set group if a reduce view
options.Set("group", "false") // is this line necessary?
if opts.Group {
options.Set("group", "true")
}
if opts.GroupLevel != 0 {
options.Set("group_level", strconv.FormatUint(uint64(opts.GroupLevel), 10))
}
}
if opts.Key != nil {
jsonKey, err := opts.marshalJSON(opts.Key)
if err != nil {
return nil, err
}
options.Set("key", string(jsonKey))
}
if len(opts.Keys) > 0 {
jsonKeys, err := opts.marshalJSON(opts.Keys)
if err != nil {
return nil, err
}
options.Set("keys", string(jsonKeys))
}
if opts.StartKey != nil {
jsonStartKey, err := opts.marshalJSON(opts.StartKey)
if err != nil {
return nil, err
}
options.Set("startkey", string(jsonStartKey))
} else {
options.Del("startkey")
}
if opts.EndKey != nil {
jsonEndKey, err := opts.marshalJSON(opts.EndKey)
if err != nil {
return nil, err
}
options.Set("endkey", string(jsonEndKey))
} else {
options.Del("endkey")
}
if opts.StartKey != nil || opts.EndKey != nil {
if opts.InclusiveEnd {
options.Set("inclusive_end", "true")
} else {
options.Set("inclusive_end", "false")
}
}
if opts.StartKeyDocID == "" {
options.Del("startkey_docid")
} else {
options.Set("startkey_docid", opts.StartKeyDocID)
}
if opts.EndKeyDocID == "" {
options.Del("endkey_docid")
} else {
options.Set("endkey_docid", opts.EndKeyDocID)
}
if opts.OnError > 0 {
if opts.OnError == ViewErrorModeContinue {
options.Set("on_error", "continue")
} else if opts.OnError == ViewErrorModeStop {
options.Set("on_error", "stop")
} else {
return nil, makeInvalidArgumentsError("unexpected onerror option")
}
}
if opts.Debug {
options.Set("debug", "true")
}
if opts.Raw != nil {
for k, v := range opts.Raw {
options.Set(k, v)
}
}
return options, nil
}
func (opts *ViewOptions) marshalJSON(value interface{}) ([]byte, error) {
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
err := enc.Encode(value)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}

View File

@@ -1,18 +0,0 @@
run:
modules-download-mode: readonly
tests: false
skip-files:
- logging.go # Logging has some utility functions that are useful to have around which get flagged up
linters:
enable:
- bodyclose
- golint
- gosec
- unconvert
linters-settings:
golint:
set-exit-status: true
min-confidence: 0.81
errcheck:
check-type-assertions: true
check-blank: true

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,24 +0,0 @@
devsetup:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
go get github.com/vektra/mockery/.../
test:
go test ./...
fasttest:
go test -short ./...
cover:
go test -coverprofile=cover.out ./...
lint:
golangci-lint run -v
check: lint
go test -cover -race ./...
updatemocks:
mockery -name dispatcher -output . -testonly -inpkg
mockery -name tracerManager -output . -testonly -inpkg
mockery -name configManager -output . -testonly -inpkg
.PHONY: all test devsetup fasttest lint cover checkerrs checkfmt checkvet checkiea checkspell check updatemocks

View File

@@ -1,22 +0,0 @@
# Couchbase Go Core
This package provides the underlying Couchbase IO for the gocb project.
If you are looking for the Couchbase Go SDK, you are probably looking for
[gocb](https://github.com/couchbase/gocb).
## Branching Strategy
The gocbcore library maintains a branch for each previous major revision
of its API. These branches are introduced just prior to any API breaking
changes. Active work is performed on the master branch, with releases
being performed as tags. Work made on master which are not yet part of a
tagged released should be considered liable to change.
## License
Copyright 2017 Couchbase Inc.
Licensed under the Apache License, Version 2.0.
See
[LICENSE](https://github.com/couchbase/gocbcore/blob/master/LICENSE)
for further details.

Some files were not shown because too many files have changed in this diff Show More