diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 95b52b8313e3..b756e2895074 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -353,6 +353,7 @@ func TestPredict_Plugins(t *testing.T) { "cert", "cf", "consul", + "couchbase-database-plugin", "elasticsearch-database-plugin", "gcp", "gcpkms", diff --git a/go.mod b/go.mod index cc4680c01caf..45646c8aa681 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,6 @@ require ( github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf github.com/aws/aws-sdk-go v1.30.27 github.com/bitly/go-hostpool v0.1.0 // indirect - github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 github.com/client9/misspell v0.3.4 github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c @@ -80,6 +79,7 @@ require ( github.com/hashicorp/vault-plugin-auth-kerberos v0.1.6 github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.0 github.com/hashicorp/vault-plugin-auth-oci v0.5.5 + github.com/hashicorp/vault-plugin-database-couchbase v0.1.0 github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4 github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.2 github.com/hashicorp/vault-plugin-secrets-ad v0.6.6 @@ -100,7 +100,7 @@ require ( github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f github.com/kr/pretty v0.2.0 github.com/kr/text v0.2.0 - github.com/lib/pq v1.2.0 + github.com/lib/pq v1.8.0 github.com/mattn/go-colorable v0.1.6 github.com/mholt/archiver v3.1.1+incompatible github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 @@ -109,7 +109,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.0.0 github.com/mitchellh/gox v1.0.1 - github.com/mitchellh/mapstructure v1.3.2 + github.com/mitchellh/mapstructure v1.3.3 github.com/mitchellh/reflectwalk v1.0.1 github.com/mongodb/go-client-mongodb-atlas v0.1.2 github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc diff --git a/go.sum b/go.sum index 996eb265a23f..8c529d87225c 100644 --- a/go.sum +++ b/go.sum @@ -88,6 +88,7 @@ github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITg github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.13 h1:Hmi80lzZuI/CaYmlJp/b+FjZdRZhKu9c2mDVqKlLWVs= github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk= @@ -193,6 +194,8 @@ github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVl github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc h1:lDK/G7OlwUnJW3O6nv/8M89bMupV6FuLK6FXmC3ueWc= github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe h1:PEmIrUvwG9Yyv+0WKZqjXfSFDeZjs/q15g0m08BYS9k= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= @@ -213,6 +216,10 @@ github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQa github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/gocb/v2 v2.1.4 h1:HRuVhqZpVNIck3FwzTxWh5TnmGXeTmSfjhxkjeradLg= +github.com/couchbase/gocb/v2 v2.1.4/go.mod h1:lESKM6wCEajrFVSZUewYuRzNtuNtnRey5wOfcZZsH90= +github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8= +github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -491,6 +498,8 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -536,6 +545,8 @@ github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.0 h1:tt/kHMFB1qjp2b2ZRSI1 github.com/hashicorp/vault-plugin-auth-kubernetes v0.7.0/go.mod h1:2c/k3nsoGPKV+zpAWCiajt4e66vncEq8Li/eKLqErAc= github.com/hashicorp/vault-plugin-auth-oci v0.5.5 h1:nIP8g+VZd2V+LY/D5omWhLSnhHuogIJx7Bz6JyLt628= github.com/hashicorp/vault-plugin-auth-oci v0.5.5/go.mod h1:Cn5cjR279Y+snw8LTaiLTko3KGrbigRbsQPOd2D5xDw= +github.com/hashicorp/vault-plugin-database-couchbase v0.1.0 h1:P/ji+KVmIXDyF3dM2PVb5wUpNMeEieFqJpj9derJlPg= +github.com/hashicorp/vault-plugin-database-couchbase v0.1.0/go.mod h1:N4esW48+x1CClz6unRkGZGUBBR87iMMLbpHpnkQDiXg= github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4 h1:YE4qndazWmYGpVOoZI7nDGG+gwTZKzL1Ou4WZQ+Tdxk= github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4/go.mod h1:QjGrrxcRXv/4XkEZAlM0VMZEa3uxKAICFqDj27FP/48= github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.2 h1:tSToR3JRARqQkV9B10rk4VlZe2Sr9fOdhEP2NdxPo0I= @@ -624,6 +635,8 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -638,6 +651,8 @@ github.com/lestrrat-go/jwx v0.9.0/go.mod h1:iEoxlYfZjvoGpuWwxUz+eR5e6KTJGsaRcy/Y github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= @@ -687,6 +702,8 @@ github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQz github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.0.0 h1:ATSdz4NWrmWPOF1CeCBU4sMCno2hgqdbSrRPFWQSVZI= github.com/mitchellh/pointerstructure v1.0.0/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -845,6 +862,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index 6148d2cd41d2..0fa066248c87 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -10,6 +10,7 @@ import ( credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" credOCI "github.com/hashicorp/vault-plugin-auth-oci" + dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase" dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" credAppId "github.com/hashicorp/vault/builtin/credential/app-id" @@ -98,15 +99,16 @@ func newRegistry() *registry { "mysql-rds-database-plugin": dbMysql.New(credsutil.NoneLength, dbMysql.LegacyMetadataLen, dbMysql.LegacyUsernameLen), "mysql-legacy-database-plugin": dbMysql.New(credsutil.NoneLength, dbMysql.LegacyMetadataLen, dbMysql.LegacyUsernameLen), - "postgresql-database-plugin": dbPostgres.New, - "redshift-database-plugin": dbRedshift.New(true), - "mssql-database-plugin": dbMssql.New, "cassandra-database-plugin": dbCass.New, - "mongodb-database-plugin": dbMongo.New, - "mongodbatlas-database-plugin": dbMongoAtlas.New, + "couchbase-database-plugin": dbCouchbase.New, + "elasticsearch-database-plugin": dbElastic.New, "hana-database-plugin": dbHana.New, "influxdb-database-plugin": dbInflux.New, - "elasticsearch-database-plugin": dbElastic.New, + "mongodb-database-plugin": dbMongo.New, + "mongodbatlas-database-plugin": dbMongoAtlas.New, + "mssql-database-plugin": dbMssql.New, + "postgresql-database-plugin": dbPostgres.New, + "redshift-database-plugin": dbRedshift.New(true), }, logicalBackends: map[string]logical.Factory{ "ad": logicalAd.Factory, diff --git a/scripts/gen_openapi.sh b/scripts/gen_openapi.sh index 1a85e4979bf1..355f30827de8 100755 --- a/scripts/gen_openapi.sh +++ b/scripts/gen_openapi.sh @@ -51,6 +51,7 @@ vault secrets enable aws vault secrets enable azure vault secrets enable cassandra vault secrets enable consul +vault secrets enable couchbase vault secrets enable database vault secrets enable gcp vault secrets enable gcpkms diff --git a/vault/testing.go b/vault/testing.go index 32ed1db61013..05dfe5d24412 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -2022,14 +2022,16 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { "mysql-aurora-database-plugin", "mysql-rds-database-plugin", "mysql-legacy-database-plugin", - "postgresql-database-plugin", - "elasticsearch-database-plugin", - "mssql-database-plugin", + "cassandra-database-plugin", - "mongodb-database-plugin", - "mongodbatlas-database-plugin", + "couchbase-database-plugin", + "elasticsearch-database-plugin", "hana-database-plugin", "influxdb-database-plugin", + "mongodb-database-plugin", + "mongodbatlas-database-plugin", + "mssql-database-plugin", + "postgresql-database-plugin", "redshift-database-plugin", } } diff --git a/vendor/github.com/couchbase/gocb/v2/.gitignore b/vendor/github.com/couchbase/gocb/v2/.gitignore new file mode 100644 index 000000000000..81218fdfc6dd --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/.gitignore @@ -0,0 +1,3 @@ +*~ + +.project \ No newline at end of file diff --git a/vendor/github.com/couchbase/gocb/v2/.gitmodules b/vendor/github.com/couchbase/gocb/v2/.gitmodules new file mode 100644 index 000000000000..c14d8fd7048a --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/.gitmodules @@ -0,0 +1,3 @@ +[submodule "testdata/sdk-testcases"] + path = testdata/sdk-testcases + url = https://github.com/couchbaselabs/sdk-testcases diff --git a/vendor/github.com/couchbase/gocb/v2/.golangci.yml b/vendor/github.com/couchbase/gocb/v2/.golangci.yml new file mode 100644 index 000000000000..e8f774aecf01 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/.golangci.yml @@ -0,0 +1,18 @@ +run: + modules-download-mode: readonly + tests: false + skip-files: + - logging.go # Logging has some utility functions that are useful to have around which get flagged up +linters: + enable: + - bodyclose + - golint + - gosec + - unconvert +linters-settings: + golint: + set-exit-status: true + min-confidence: 0.81 + errcheck: + check-type-assertions: true + check-blank: true diff --git a/vendor/github.com/couchbase/gocb/v2/LICENSE b/vendor/github.com/couchbase/gocb/v2/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/couchbase/gocb/v2/Makefile b/vendor/github.com/couchbase/gocb/v2/Makefile new file mode 100644 index 000000000000..a21c8dde7426 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/Makefile @@ -0,0 +1,39 @@ +devsetup: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + go get github.com/vektra/mockery/.../ + git submodule update --remote --init --recursive + +test: + go test ./ +fasttest: + go test -short ./ + +cover: + go test -coverprofile=cover.out ./ + +lint: + golangci-lint run -v + +check: lint + go test -short -cover -race ./ + +bench: + go test -bench=. -run=none --disable-logger=true + +updatetestcases: + git submodule update --remote --init --recursive + +updatemocks: + mockery -name connectionManager -output . -testonly -inpkg + mockery -name kvProvider -output . -testonly -inpkg + mockery -name httpProvider -output . -testonly -inpkg + mockery -name diagnosticsProvider -output . -testonly -inpkg + mockery -name mgmtProvider -output . -testonly -inpkg + mockery -name analyticsProvider -output . -testonly -inpkg + mockery -name queryProvider -output . -testonly -inpkg + mockery -name searchProvider -output . -testonly -inpkg + mockery -name viewProvider -output . -testonly -inpkg + mockery -name waitUntilReadyProvider -output . -testonly -inpkg + # pendingOp is manually mocked + +.PHONY: all test devsetup fasttest lint cover check bench updatetestcases updatemocks diff --git a/vendor/github.com/couchbase/gocb/v2/README.md b/vendor/github.com/couchbase/gocb/v2/README.md new file mode 100644 index 000000000000..0fafa2561954 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/README.md @@ -0,0 +1,53 @@ +[![GoDoc](https://godoc.org/github.com/couchbase/gocb?status.png)](https://godoc.org/github.com/couchbase/gocb) + +# Couchbase Go Client + +This is the official Couchbase Go SDK. If you are looking for our +previous unofficial prototype Go client library, please see: +[http://www.github.com/couchbase/go-couchbase](http://www.github.com/couchbase/go-couchbase). + +The Go SDK library allows you to connect to a Couchbase cluster from +Go. It is written in pure Go, and uses the included gocbcore library to +handle communicating to the cluster over the Couchbase binary +protocol. + + +## Useful Links + +### Source +The project source is hosted at [http://github.com/couchbase/gocb](http://github.com/couchbase/gocb). + +### Documentation +You can explore our API reference through godoc at [https://godoc.org/github.com/couchbase/gocb](https://godoc.org/github.com/couchbase/gocb). + +You can also find documentation for the Go SDK at the Couchbase [Developer Portal](https://developer.couchbase.com/documentation/server/current/sdk/go/start-using-sdk.html). + +### Bug Tracker +Issues are tracked on Couchbase's public [issues.couchbase.com](http://www.couchbase.com/issues/browse/GOCBC). +Contact [the site admins](https://issues.couchbase.com/secure/ContactAdministrators!default.jspa) +regarding login or other problems at issues.couchbase.com (officially) or ask +around in [couchbase/discuss on gitter.im](https://gitter.im/couchbase/discuss) +(unofficially). + + +## Installing + +To install the latest stable version, run: +```bash +go get github.com/couchbase/gocb/v2 +``` + +To install the latest developer version, run: +```bash +go get github.com/couchbase/gocb +``` + + +## License +Copyright 2016 Couchbase Inc. + +Licensed under the Apache License, Version 2.0. + +See +[LICENSE](https://github.com/couchbase/gocb/blob/master/LICENSE) +for further details. diff --git a/vendor/github.com/couchbase/gocb/v2/analyticsquery_options.go b/vendor/github.com/couchbase/gocb/v2/analyticsquery_options.go new file mode 100644 index 000000000000..ba6a3059e5f2 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/analyticsquery_options.go @@ -0,0 +1,89 @@ +package gocb + +import ( + "strings" + "time" + + "github.com/google/uuid" +) + +// AnalyticsScanConsistency indicates the level of data consistency desired for an analytics query. +type AnalyticsScanConsistency uint + +const ( + // AnalyticsScanConsistencyNotBounded indicates no data consistency is required. + AnalyticsScanConsistencyNotBounded AnalyticsScanConsistency = iota + 1 + // AnalyticsScanConsistencyRequestPlus indicates that request-level data consistency is required. + AnalyticsScanConsistencyRequestPlus +) + +// AnalyticsOptions is the set of options available to an Analytics query. +type AnalyticsOptions struct { + // ClientContextID provides a unique ID for this query which can be used matching up requests between connectionManager and + // server. If not provided will be assigned a uuid value. + ClientContextID string + + // Priority sets whether this query should be assigned as high priority by the analytics engine. + Priority bool + PositionalParameters []interface{} + NamedParameters map[string]interface{} + Readonly bool + ScanConsistency AnalyticsScanConsistency + + // Raw provides a way to provide extra parameters in the request body for the query. + Raw map[string]interface{} + + Timeout time.Duration + RetryStrategy RetryStrategy + + parentSpan requestSpanContext +} + +func (opts *AnalyticsOptions) toMap() (map[string]interface{}, error) { + execOpts := make(map[string]interface{}) + + if opts.ClientContextID == "" { + execOpts["client_context_id"] = uuid.New().String() + } else { + execOpts["client_context_id"] = opts.ClientContextID + } + + if opts.ScanConsistency != 0 { + if opts.ScanConsistency == AnalyticsScanConsistencyNotBounded { + execOpts["scan_consistency"] = "not_bounded" + } else if opts.ScanConsistency == AnalyticsScanConsistencyRequestPlus { + execOpts["scan_consistency"] = "request_plus" + } else { + return nil, makeInvalidArgumentsError("unexpected consistency option") + } + } + + if opts.PositionalParameters != nil && opts.NamedParameters != nil { + return nil, makeInvalidArgumentsError("positional and named parameters must be used exclusively") + } + + if opts.PositionalParameters != nil { + execOpts["args"] = opts.PositionalParameters + } + + if opts.NamedParameters != nil { + for key, value := range opts.NamedParameters { + if !strings.HasPrefix(key, "$") { + key = "$" + key + } + execOpts[key] = value + } + } + + if opts.Readonly { + execOpts["readonly"] = true + } + + if opts.Raw != nil { + for k, v := range opts.Raw { + execOpts[k] = v + } + } + + return execOpts, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/asyncopmanager.go b/vendor/github.com/couchbase/gocb/v2/asyncopmanager.go new file mode 100644 index 000000000000..82534c613b75 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/asyncopmanager.go @@ -0,0 +1,36 @@ +package gocb + +import ( + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type asyncOpManager struct { + signal chan struct{} + + wasResolved bool +} + +func (m *asyncOpManager) Reject() { + m.signal <- struct{}{} +} + +func (m *asyncOpManager) Resolve() { + m.wasResolved = true + m.signal <- struct{}{} +} + +func (m *asyncOpManager) Wait(op gocbcore.PendingOp, err error) error { + if err != nil { + return err + } + + <-m.signal + + return nil +} + +func newAsyncOpManager() *asyncOpManager { + return &asyncOpManager{ + signal: make(chan struct{}, 1), + } +} diff --git a/vendor/github.com/couchbase/gocb/v2/auth.go b/vendor/github.com/couchbase/gocb/v2/auth.go new file mode 100644 index 000000000000..f33c6c034061 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/auth.go @@ -0,0 +1,143 @@ +package gocb + +import ( + "crypto/tls" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +// UserPassPair represents a username and password pair. +// VOLATILE: This API is subject to change at any time. +type UserPassPair gocbcore.UserPassPair + +// AuthCredsRequest encapsulates the data for a credential request +// from the new Authenticator interface. +// VOLATILE: This API is subject to change at any time. +type AuthCredsRequest struct { + Service ServiceType + Endpoint string +} + +// AuthCertRequest encapsulates the data for a certificate request +// from the new Authenticator interface. +// VOLATILE: This API is subject to change at any time. +type AuthCertRequest struct { + Service ServiceType + Endpoint string +} + +// Authenticator provides an interface to authenticate to each service. Note that +// only authenticators implemented via the SDK are stable. +type Authenticator interface { + // VOLATILE: This API is subject to change at any time. + SupportsTLS() bool + + // VOLATILE: This API is subject to change at any time. + SupportsNonTLS() bool + + // VOLATILE: This API is subject to change at any time. + Certificate(req AuthCertRequest) (*tls.Certificate, error) + + // VOLATILE: This API is subject to change at any time. + Credentials(req AuthCredsRequest) ([]UserPassPair, error) +} + +// PasswordAuthenticator implements an Authenticator which uses an RBAC username and password. +type PasswordAuthenticator struct { + Username string + Password string +} + +// SupportsTLS returns whether this authenticator can authenticate a TLS connection. +// VOLATILE: This API is subject to change at any time. +func (ra PasswordAuthenticator) SupportsTLS() bool { + return true +} + +// SupportsNonTLS returns whether this authenticator can authenticate a non-TLS connection. +// VOLATILE: This API is subject to change at any time. +func (ra PasswordAuthenticator) SupportsNonTLS() bool { + return true +} + +// Certificate returns the certificate to use when connecting to a specified server. +// VOLATILE: This API is subject to change at any time. +func (ra PasswordAuthenticator) Certificate(req AuthCertRequest) (*tls.Certificate, error) { + return nil, nil +} + +// Credentials returns the credentials for a particular service. +// VOLATILE: This API is subject to change at any time. +func (ra PasswordAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) { + return []UserPassPair{{ + Username: ra.Username, + Password: ra.Password, + }}, nil +} + +// CertificateAuthenticator implements an Authenticator which can be used with certificate authentication. +type CertificateAuthenticator struct { + ClientCertificate *tls.Certificate +} + +// SupportsTLS returns whether this authenticator can authenticate a TLS connection. +// VOLATILE: This API is subject to change at any time. +func (ca CertificateAuthenticator) SupportsTLS() bool { + return true +} + +// SupportsNonTLS returns whether this authenticator can authenticate a non-TLS connection. +// VOLATILE: This API is subject to change at any time. +func (ca CertificateAuthenticator) SupportsNonTLS() bool { + return false +} + +// Certificate returns the certificate to use when connecting to a specified server. +// VOLATILE: This API is subject to change at any time. +func (ca CertificateAuthenticator) Certificate(req AuthCertRequest) (*tls.Certificate, error) { + return ca.ClientCertificate, nil +} + +// Credentials returns the credentials for a particular service. +// VOLATILE: This API is subject to change at any time. +func (ca CertificateAuthenticator) Credentials(req AuthCredsRequest) ([]UserPassPair, error) { + return []UserPassPair{{ + Username: "", + Password: "", + }}, nil +} + +type coreAuthWrapper struct { + auth Authenticator +} + +func (auth *coreAuthWrapper) SupportsTLS() bool { + return auth.auth.SupportsTLS() +} + +func (auth *coreAuthWrapper) SupportsNonTLS() bool { + return auth.auth.SupportsNonTLS() +} + +func (auth *coreAuthWrapper) Certificate(req gocbcore.AuthCertRequest) (*tls.Certificate, error) { + return auth.auth.Certificate(AuthCertRequest{ + Service: ServiceType(req.Service), + Endpoint: req.Endpoint, + }) +} + +func (auth *coreAuthWrapper) Credentials(req gocbcore.AuthCredsRequest) ([]gocbcore.UserPassPair, error) { + creds, err := auth.auth.Credentials(AuthCredsRequest{ + Service: ServiceType(req.Service), + Endpoint: req.Endpoint, + }) + if err != nil { + return nil, err + } + + coreCreds := make([]gocbcore.UserPassPair, len(creds)) + for credIdx, userPass := range creds { + coreCreds[credIdx] = gocbcore.UserPassPair(userPass) + } + return coreCreds, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/bucket.go b/vendor/github.com/couchbase/gocb/v2/bucket.go new file mode 100644 index 000000000000..7c14e62dc24b --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/bucket.go @@ -0,0 +1,153 @@ +package gocb + +import ( + "time" + + "github.com/couchbase/gocbcore/v9" +) + +// Bucket represents a single bucket within a cluster. +type Bucket struct { + bucketName string + + timeoutsConfig TimeoutsConfig + + transcoder Transcoder + retryStrategyWrapper *retryStrategyWrapper + tracer requestTracer + + useServerDurations bool + useMutationTokens bool + + bootstrapError error + connectionManager connectionManager +} + +func newBucket(c *Cluster, bucketName string) *Bucket { + return &Bucket{ + bucketName: bucketName, + + timeoutsConfig: c.timeoutsConfig, + + transcoder: c.transcoder, + + retryStrategyWrapper: c.retryStrategyWrapper, + + tracer: c.tracer, + + useServerDurations: c.useServerDurations, + useMutationTokens: c.useMutationTokens, + + connectionManager: c.connectionManager, + } +} + +func (b *Bucket) setBootstrapError(err error) { + b.bootstrapError = err +} + +func (b *Bucket) getKvProvider() (kvProvider, error) { + if b.bootstrapError != nil { + return nil, b.bootstrapError + } + + agent, err := b.connectionManager.getKvProvider(b.bucketName) + if err != nil { + return nil, err + } + + return agent, nil +} + +// Name returns the name of the bucket. +func (b *Bucket) Name() string { + return b.bucketName +} + +// Scope returns an instance of a Scope. +// VOLATILE: This API is subject to change at any time. +func (b *Bucket) Scope(scopeName string) *Scope { + return newScope(b, scopeName) +} + +// DefaultScope returns an instance of the default scope. +// VOLATILE: This API is subject to change at any time. +func (b *Bucket) DefaultScope() *Scope { + return b.Scope("_default") +} + +// Collection returns an instance of a collection from within the default scope. +// VOLATILE: This API is subject to change at any time. +func (b *Bucket) Collection(collectionName string) *Collection { + return b.DefaultScope().Collection(collectionName) +} + +// DefaultCollection returns an instance of the default collection. +func (b *Bucket) DefaultCollection() *Collection { + return b.DefaultScope().Collection("_default") +} + +// ViewIndexes returns a ViewIndexManager instance for managing views. +func (b *Bucket) ViewIndexes() *ViewIndexManager { + return &ViewIndexManager{ + mgmtProvider: b, + bucketName: b.Name(), + tracer: b.tracer, + } +} + +// Collections provides functions for managing collections. +func (b *Bucket) Collections() *CollectionManager { + // TODO: return error for unsupported collections + return &CollectionManager{ + mgmtProvider: b, + bucketName: b.Name(), + tracer: b.tracer, + } +} + +// WaitUntilReady will wait for the bucket object to be ready for use. +// At present this will wait until memd connections have been established with the server and are ready +// to be used before performing a ping against the specified services (except KeyValue) which also +// exist in the cluster map. +// If no services are specified then will wait until KeyValue is ready. +// Valid service types are: ServiceTypeKeyValue, ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, +// ServiceTypeAnalytics, ServiceTypeViews. +func (b *Bucket) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error { + if opts == nil { + opts = &WaitUntilReadyOptions{} + } + + if b.bootstrapError != nil { + return b.bootstrapError + } + + provider, err := b.connectionManager.getWaitUntilReadyProvider(b.bucketName) + if err != nil { + return err + } + + desiredState := opts.DesiredState + if desiredState == 0 { + desiredState = ClusterStateOnline + } + + services := opts.ServiceTypes + gocbcoreServices := make([]gocbcore.ServiceType, len(services)) + for i, svc := range services { + gocbcoreServices[i] = gocbcore.ServiceType(svc) + } + + err = provider.WaitUntilReady( + time.Now().Add(timeout), + gocbcore.WaitUntilReadyOptions{ + DesiredState: gocbcore.ClusterState(desiredState), + ServiceTypes: gocbcoreServices, + }, + ) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/bucket_collectionsmgr.go b/vendor/github.com/couchbase/gocb/v2/bucket_collectionsmgr.go new file mode 100644 index 000000000000..530e8fc69598 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/bucket_collectionsmgr.go @@ -0,0 +1,389 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/couchbase/gocbcore/v9" +) + +// CollectionSpec describes the specification of a collection. +type CollectionSpec struct { + Name string + ScopeName string + MaxExpiry time.Duration +} + +// ScopeSpec describes the specification of a scope. +type ScopeSpec struct { + Name string + Collections []CollectionSpec +} + +// These 3 types are temporary. They are necessary for now as the server beta was released with ns_server returning +// a different jsonManifest format to what it will return in the future. +type jsonManifest struct { + UID uint64 `json:"uid"` + Scopes map[string]jsonManifestScope `json:"scopes"` +} + +type jsonManifestScope struct { + UID uint32 `json:"uid"` + Collections map[string]jsonManifestCollection `json:"collections"` +} + +type jsonManifestCollection struct { + UID uint32 `json:"uid"` +} + +// CollectionManager provides methods for performing collections management. +type CollectionManager struct { + mgmtProvider mgmtProvider + bucketName string + tracer requestTracer +} + +func (cm *CollectionManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logDebugf("failed to read http body: %s", err) + return nil + } + + errText := strings.ToLower(string(b)) + + if resp.StatusCode == 404 { + if strings.Contains(errText, "not found") && strings.Contains(errText, "scope") { + return makeGenericMgmtError(ErrScopeNotFound, req, resp) + } else if strings.Contains(errText, "not found") && strings.Contains(errText, "scope") { + return makeGenericMgmtError(ErrScopeNotFound, req, resp) + } + } + + if strings.Contains(errText, "already exists") && strings.Contains(errText, "collection") { + return makeGenericMgmtError(ErrCollectionExists, req, resp) + } else if strings.Contains(errText, "already exists") && strings.Contains(errText, "scope") { + return makeGenericMgmtError(ErrScopeExists, req, resp) + } + + return makeGenericMgmtError(errors.New(errText), req, resp) +} + +// GetAllScopesOptions is the set of options available to the GetAllScopes operation. +type GetAllScopesOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllScopes gets all scopes from the bucket. +func (cm *CollectionManager) GetAllScopes(opts *GetAllScopesOptions) ([]ScopeSpec, error) { + if opts == nil { + opts = &GetAllScopesOptions{} + } + + span := cm.tracer.StartSpan("GetAllScopes", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/collections", cm.bucketName), + Method: "GET", + RetryStrategy: opts.RetryStrategy, + IsIdempotent: true, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := cm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + colErr := cm.tryParseErrorMessage(&req, resp) + if colErr != nil { + return nil, colErr + } + return nil, makeMgmtBadStatusError("failed to get all scopes", &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + return nil, makeMgmtBadStatusError("failed to get all scopes", &req, resp) + } + + var scopes []ScopeSpec + var mfest gocbcore.Manifest + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&mfest) + if err == nil { + for _, scope := range mfest.Scopes { + var collections []CollectionSpec + for _, col := range scope.Collections { + collections = append(collections, CollectionSpec{ + Name: col.Name, + ScopeName: scope.Name, + }) + } + scopes = append(scopes, ScopeSpec{ + Name: scope.Name, + Collections: collections, + }) + } + } else { + // Temporary support for older server version + var oldMfest jsonManifest + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&oldMfest) + if err != nil { + return nil, err + } + + for scopeName, scope := range oldMfest.Scopes { + var collections []CollectionSpec + for colName := range scope.Collections { + collections = append(collections, CollectionSpec{ + Name: colName, + ScopeName: scopeName, + }) + } + scopes = append(scopes, ScopeSpec{ + Name: scopeName, + Collections: collections, + }) + } + } + + return scopes, nil +} + +// CreateCollectionOptions is the set of options available to the CreateCollection operation. +type CreateCollectionOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateCollection creates a new collection on the bucket. +func (cm *CollectionManager) CreateCollection(spec CollectionSpec, opts *CreateCollectionOptions) error { + if spec.Name == "" { + return makeInvalidArgumentsError("collection name cannot be empty") + } + + if spec.ScopeName == "" { + return makeInvalidArgumentsError("scope name cannot be empty") + } + + if opts == nil { + opts = &CreateCollectionOptions{} + } + + span := cm.tracer.StartSpan("CreateCollection", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + posts := url.Values{} + posts.Add("name", spec.Name) + + if spec.MaxExpiry > 0 { + posts.Add("maxTTL", fmt.Sprintf("%d", int(spec.MaxExpiry.Seconds()))) + } + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s", cm.bucketName, spec.ScopeName), + Method: "POST", + Body: []byte(posts.Encode()), + ContentType: "application/x-www-form-urlencoded", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := cm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + colErr := cm.tryParseErrorMessage(&req, resp) + if colErr != nil { + return colErr + } + return makeMgmtBadStatusError("failed to create collection", &req, resp) + } + + err = resp.Body.Close() + if err != nil { + logDebugf("Failed to close socket (%s)", err) + } + + return nil +} + +// DropCollectionOptions is the set of options available to the DropCollection operation. +type DropCollectionOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropCollection removes a collection. +func (cm *CollectionManager) DropCollection(spec CollectionSpec, opts *DropCollectionOptions) error { + if spec.Name == "" { + return makeInvalidArgumentsError("collection name cannot be empty") + } + + if spec.ScopeName == "" { + return makeInvalidArgumentsError("scope name cannot be empty") + } + + if opts == nil { + opts = &DropCollectionOptions{} + } + + span := cm.tracer.StartSpan("DropCollection", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s/%s", cm.bucketName, spec.ScopeName, spec.Name), + Method: "DELETE", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := cm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + colErr := cm.tryParseErrorMessage(&req, resp) + if colErr != nil { + return colErr + } + return makeMgmtBadStatusError("failed to drop collection", &req, resp) + } + + err = resp.Body.Close() + if err != nil { + logDebugf("Failed to close socket (%s)", err) + } + + return nil +} + +// CreateScopeOptions is the set of options available to the CreateScope operation. +type CreateScopeOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateScope creates a new scope on the bucket. +func (cm *CollectionManager) CreateScope(scopeName string, opts *CreateScopeOptions) error { + if scopeName == "" { + return makeInvalidArgumentsError("scope name cannot be empty") + } + + if opts == nil { + opts = &CreateScopeOptions{} + } + + span := cm.tracer.StartSpan("CreateScope", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + posts := url.Values{} + posts.Add("name", scopeName) + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/collections", cm.bucketName), + Method: "POST", + Body: []byte(posts.Encode()), + ContentType: "application/x-www-form-urlencoded", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := cm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + colErr := cm.tryParseErrorMessage(&req, resp) + if colErr != nil { + return colErr + } + return makeMgmtBadStatusError("failed to create scope", &req, resp) + } + + err = resp.Body.Close() + if err != nil { + logDebugf("Failed to close socket (%s)", err) + } + + return nil +} + +// DropScopeOptions is the set of options available to the DropScope operation. +type DropScopeOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropScope removes a scope. +func (cm *CollectionManager) DropScope(scopeName string, opts *DropScopeOptions) error { + if opts == nil { + opts = &DropScopeOptions{} + } + + span := cm.tracer.StartSpan("DropScope", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/collections/%s", cm.bucketName, scopeName), + Method: "DELETE", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := cm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + colErr := cm.tryParseErrorMessage(&req, resp) + if colErr != nil { + return colErr + } + return makeMgmtBadStatusError("failed to drop scope", &req, resp) + } + + err = resp.Body.Close() + if err != nil { + logDebugf("Failed to close socket (%s)", err) + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/bucket_internal.go b/vendor/github.com/couchbase/gocb/v2/bucket_internal.go new file mode 100644 index 000000000000..aedd707c793d --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/bucket_internal.go @@ -0,0 +1,20 @@ +package gocb + +import gocbcore "github.com/couchbase/gocbcore/v9" + +// InternalBucket is used for internal functionality. +// Internal: This should never be used and is not supported. +type InternalBucket struct { + bucket *Bucket +} + +// Internal returns a CollectionInternal. +// Internal: This should never be used and is not supported. +func (b *Bucket) Internal() *InternalBucket { + return &InternalBucket{bucket: b} +} + +// IORouter returns the collection's internal core router. +func (ib *InternalBucket) IORouter() (*gocbcore.Agent, error) { + return ib.bucket.connectionManager.connection(ib.bucket.Name()) +} diff --git a/vendor/github.com/couchbase/gocb/v2/bucket_ping.go b/vendor/github.com/couchbase/gocb/v2/bucket_ping.go new file mode 100644 index 000000000000..c2b89c13db22 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/bucket_ping.go @@ -0,0 +1,95 @@ +package gocb + +import ( + "encoding/json" + "time" +) + +// EndpointPingReport represents a single entry in a ping report. +type EndpointPingReport struct { + ID string + Local string + Remote string + State PingState + Error string + Namespace string + Latency time.Duration +} + +// PingResult encapsulates the details from a executed ping operation. +type PingResult struct { + ID string + Services map[ServiceType][]EndpointPingReport + + sdk string +} + +type jsonEndpointPingReport struct { + ID string `json:"id,omitempty"` + Local string `json:"local,omitempty"` + Remote string `json:"remote,omitempty"` + State string `json:"state,omitempty"` + Error string `json:"error,omitempty"` + Namespace string `json:"namespace,omitempty"` + LatencyUs uint64 `json:"latency_us"` +} + +type jsonPingReport struct { + Version uint16 `json:"version"` + SDK string `json:"sdk,omitempty"` + ID string `json:"id,omitempty"` + Services map[string][]jsonEndpointPingReport `json:"services,omitempty"` +} + +// MarshalJSON generates a JSON representation of this ping report. +func (report *PingResult) MarshalJSON() ([]byte, error) { + jsonReport := jsonPingReport{ + Version: 2, + SDK: report.sdk, + ID: report.ID, + Services: make(map[string][]jsonEndpointPingReport), + } + + for serviceType, serviceInfo := range report.Services { + serviceStr := serviceTypeToString(serviceType) + if _, ok := jsonReport.Services[serviceStr]; !ok { + jsonReport.Services[serviceStr] = make([]jsonEndpointPingReport, 0) + } + + for _, service := range serviceInfo { + jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonEndpointPingReport{ + ID: service.ID, + Local: service.Local, + Remote: service.Remote, + State: pingStateToString(service.State), + Error: service.Error, + Namespace: service.Namespace, + LatencyUs: uint64(service.Latency / time.Nanosecond), + }) + } + } + + return json.Marshal(&jsonReport) +} + +// PingOptions are the options available to the Ping operation. +type PingOptions struct { + ServiceTypes []ServiceType + ReportID string + Timeout time.Duration +} + +// Ping will ping a list of services and verify they are active and +// responding in an acceptable period of time. +func (b *Bucket) Ping(opts *PingOptions) (*PingResult, error) { + if opts == nil { + opts = &PingOptions{} + } + + provider, err := b.connectionManager.getDiagnosticsProvider(b.bucketName) + if err != nil { + return nil, err + } + + return ping(provider, opts, b.timeoutsConfig) +} diff --git a/vendor/github.com/couchbase/gocb/v2/bucket_viewindexes.go b/vendor/github.com/couchbase/gocb/v2/bucket_viewindexes.go new file mode 100644 index 000000000000..bf0caa56dc41 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/bucket_viewindexes.go @@ -0,0 +1,460 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/pkg/errors" +) + +// DesignDocumentNamespace represents which namespace a design document resides in. +type DesignDocumentNamespace uint + +const ( + // DesignDocumentNamespaceProduction means that a design document resides in the production namespace. + DesignDocumentNamespaceProduction DesignDocumentNamespace = iota + + // DesignDocumentNamespaceDevelopment means that a design document resides in the development namespace. + DesignDocumentNamespaceDevelopment +) + +// View represents a Couchbase view within a design document. +type jsonView struct { + Map string `json:"map,omitempty"` + Reduce string `json:"reduce,omitempty"` +} + +// DesignDocument represents a Couchbase design document containing multiple views. +type jsonDesignDocument struct { + Views map[string]jsonView `json:"views,omitempty"` +} + +// View represents a Couchbase view within a design document. +type View struct { + Map string + Reduce string +} + +func (v *View) fromData(data jsonView) error { + v.Map = data.Map + v.Reduce = data.Reduce + + return nil +} + +func (v *View) toData() (jsonView, error) { + var data jsonView + + data.Map = v.Map + data.Reduce = v.Reduce + + return data, nil +} + +// DesignDocument represents a Couchbase design document containing multiple views. +type DesignDocument struct { + Name string + Views map[string]View +} + +func (dd *DesignDocument) fromData(data jsonDesignDocument, name string) error { + dd.Name = name + + views := make(map[string]View) + for viewName, viewData := range data.Views { + var view View + err := view.fromData(viewData) + if err != nil { + return err + } + + views[viewName] = view + } + dd.Views = views + + return nil +} + +func (dd *DesignDocument) toData() (jsonDesignDocument, string, error) { + var data jsonDesignDocument + + views := make(map[string]jsonView) + for viewName, view := range dd.Views { + viewData, err := view.toData() + if err != nil { + return jsonDesignDocument{}, "", err + } + + views[viewName] = viewData + } + data.Views = views + + return data, dd.Name, nil +} + +// ViewIndexManager provides methods for performing View management. +type ViewIndexManager struct { + mgmtProvider mgmtProvider + bucketName string + + tracer requestTracer +} + +func (vm *ViewIndexManager) tryParseErrorMessage(req mgmtRequest, resp *mgmtResponse) error { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logDebugf("Failed to read view index manager response body: %s", err) + return nil + } + + if resp.StatusCode == 404 { + if strings.Contains(strings.ToLower(string(b)), "not_found") { + return makeGenericMgmtError(ErrDesignDocumentNotFound, &req, resp) + } + + return makeGenericMgmtError(errors.New(string(b)), &req, resp) + } + + var mgrErr bucketMgrErrorResp + err = json.Unmarshal(b, &mgrErr) + if err != nil { + logDebugf("Failed to unmarshal error body: %s", err) + return makeGenericMgmtError(errors.New(string(b)), &req, resp) + } + + var bodyErr error + var firstErr string + for _, err := range mgrErr.Errors { + firstErr = strings.ToLower(err) + break + } + + if strings.Contains(firstErr, "bucket with given name already exists") { + bodyErr = ErrBucketExists + } else { + bodyErr = errors.New(firstErr) + } + + return makeGenericMgmtError(bodyErr, &req, resp) +} + +func (vm *ViewIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) { + resp, err := vm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return nil, err + } + + return resp, nil +} + +// GetDesignDocumentOptions is the set of options available to the ViewIndexManager GetDesignDocument operation. +type GetDesignDocumentOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +func (vm *ViewIndexManager) ddocName(name string, namespace DesignDocumentNamespace) string { + if namespace == DesignDocumentNamespaceProduction { + if strings.HasPrefix(name, "dev_") { + name = strings.TrimLeft(name, "dev_") + } + } else { + if !strings.HasPrefix(name, "dev_") { + name = "dev_" + name + } + } + + return name +} + +// GetDesignDocument retrieves a single design document for the given bucket. +func (vm *ViewIndexManager) GetDesignDocument(name string, namespace DesignDocumentNamespace, opts *GetDesignDocumentOptions) (*DesignDocument, error) { + if opts == nil { + opts = &GetDesignDocumentOptions{} + } + + span := vm.tracer.StartSpan("GetDesignDocument", nil).SetTag("couchbase.service", "view") + defer span.Finish() + + return vm.getDesignDocument(span.Context(), name, namespace, time.Now(), opts) +} + +func (vm *ViewIndexManager) getDesignDocument(tracectx requestSpanContext, name string, namespace DesignDocumentNamespace, + startTime time.Time, opts *GetDesignDocumentOptions) (*DesignDocument, error) { + + name = vm.ddocName(name, namespace) + + req := mgmtRequest{ + Service: ServiceTypeViews, + Path: fmt.Sprintf("/_design/%s", name), + Method: "GET", + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: tracectx, + } + resp, err := vm.doMgmtRequest(req) + if err != nil { + return nil, err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + vwErr := vm.tryParseErrorMessage(req, resp) + if vwErr != nil { + return nil, vwErr + } + + return nil, makeGenericMgmtError(errors.New("failed to get design document"), &req, resp) + } + + var ddocData jsonDesignDocument + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&ddocData) + if err != nil { + return nil, err + } + + ddocName := strings.TrimPrefix(name, "dev_") + + var ddoc DesignDocument + err = ddoc.fromData(ddocData, ddocName) + if err != nil { + return nil, err + } + + return &ddoc, nil +} + +// GetAllDesignDocumentsOptions is the set of options available to the ViewIndexManager GetAllDesignDocuments operation. +type GetAllDesignDocumentsOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllDesignDocuments will retrieve all design documents for the given bucket. +func (vm *ViewIndexManager) GetAllDesignDocuments(namespace DesignDocumentNamespace, opts *GetAllDesignDocumentsOptions) ([]DesignDocument, error) { + if opts == nil { + opts = &GetAllDesignDocumentsOptions{} + } + + span := vm.tracer.StartSpan("GetAllDesignDocuments", nil).SetTag("couchbase.service", "view") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/ddocs", vm.bucketName), + Method: "GET", + IsIdempotent: true, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span.Context(), + } + resp, err := vm.doMgmtRequest(req) + if err != nil { + return nil, err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + vwErr := vm.tryParseErrorMessage(req, resp) + if vwErr != nil { + return nil, vwErr + } + + return nil, makeGenericMgmtError(errors.New("failed to get design documents"), &req, resp) + } + + var ddocsResp struct { + Rows []struct { + Doc struct { + Meta struct { + ID string `json:"id"` + } + JSON jsonDesignDocument `json:"json"` + } `json:"doc"` + } `json:"rows"` + } + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&ddocsResp) + if err != nil { + return nil, err + } + + ddocs := make([]DesignDocument, len(ddocsResp.Rows)) + for ddocIdx, ddocData := range ddocsResp.Rows { + ddocName := strings.TrimPrefix(ddocData.Doc.Meta.ID[8:], "dev_") + + err := ddocs[ddocIdx].fromData(ddocData.Doc.JSON, ddocName) + if err != nil { + return nil, err + } + } + + return ddocs, nil +} + +// UpsertDesignDocumentOptions is the set of options available to the ViewIndexManager UpsertDesignDocument operation. +type UpsertDesignDocumentOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// UpsertDesignDocument will insert a design document to the given bucket, or update +// an existing design document with the same name. +func (vm *ViewIndexManager) UpsertDesignDocument(ddoc DesignDocument, namespace DesignDocumentNamespace, opts *UpsertDesignDocumentOptions) error { + if opts == nil { + opts = &UpsertDesignDocumentOptions{} + } + + span := vm.tracer.StartSpan("UpsertDesignDocument", nil).SetTag("couchbase.service", "view") + defer span.Finish() + + return vm.upsertDesignDocument(span.Context(), ddoc, namespace, time.Now(), opts) +} + +func (vm *ViewIndexManager) upsertDesignDocument( + tracectx requestSpanContext, + ddoc DesignDocument, + namespace DesignDocumentNamespace, + startTime time.Time, + opts *UpsertDesignDocumentOptions, +) error { + ddocData, ddocName, err := ddoc.toData() + if err != nil { + return err + } + + espan := vm.tracer.StartSpan("encode", tracectx) + data, err := json.Marshal(&ddocData) + espan.Finish() + if err != nil { + return err + } + + ddocName = vm.ddocName(ddocName, namespace) + + req := mgmtRequest{ + Service: ServiceTypeViews, + Path: fmt.Sprintf("/_design/%s", ddocName), + Method: "PUT", + Body: data, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: tracectx, + } + resp, err := vm.doMgmtRequest(req) + if err != nil { + return err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 201 { + vwErr := vm.tryParseErrorMessage(req, resp) + if vwErr != nil { + return vwErr + } + + return makeGenericMgmtError(errors.New("failed to upsert design document"), &req, resp) + } + + return nil +} + +// DropDesignDocumentOptions is the set of options available to the ViewIndexManager Upsert operation. +type DropDesignDocumentOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropDesignDocument will remove a design document from the given bucket. +func (vm *ViewIndexManager) DropDesignDocument(name string, namespace DesignDocumentNamespace, opts *DropDesignDocumentOptions) error { + if opts == nil { + opts = &DropDesignDocumentOptions{} + } + + span := vm.tracer.StartSpan("DropDesignDocument", nil).SetTag("couchbase.service", "view") + defer span.Finish() + + return vm.dropDesignDocument(span.Context(), name, namespace, time.Now(), opts) +} + +func (vm *ViewIndexManager) dropDesignDocument(tracectx requestSpanContext, name string, namespace DesignDocumentNamespace, + startTime time.Time, opts *DropDesignDocumentOptions) error { + + name = vm.ddocName(name, namespace) + + req := mgmtRequest{ + Service: ServiceTypeViews, + Path: fmt.Sprintf("/_design/%s", name), + Method: "DELETE", + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: tracectx, + } + resp, err := vm.doMgmtRequest(req) + if err != nil { + return err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + vwErr := vm.tryParseErrorMessage(req, resp) + if vwErr != nil { + return vwErr + } + + return makeGenericMgmtError(errors.New("failed to drop design document"), &req, resp) + } + + return nil +} + +// PublishDesignDocumentOptions is the set of options available to the ViewIndexManager PublishDesignDocument operation. +type PublishDesignDocumentOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// PublishDesignDocument publishes a design document to the given bucket. +func (vm *ViewIndexManager) PublishDesignDocument(name string, opts *PublishDesignDocumentOptions) error { + startTime := time.Now() + if opts == nil { + opts = &PublishDesignDocumentOptions{} + } + + span := vm.tracer.StartSpan("PublishDesignDocument", nil). + SetTag("couchbase.service", "view") + defer span.Finish() + + devdoc, err := vm.getDesignDocument( + span.Context(), + name, + DesignDocumentNamespaceDevelopment, + startTime, + &GetDesignDocumentOptions{ + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + }) + if err != nil { + return err + } + + err = vm.upsertDesignDocument( + span.Context(), + *devdoc, + DesignDocumentNamespaceProduction, + startTime, + &UpsertDesignDocumentOptions{ + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + }) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/bucket_viewquery.go b/vendor/github.com/couchbase/gocb/v2/bucket_viewquery.go new file mode 100644 index 000000000000..74aedabbf893 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/bucket_viewquery.go @@ -0,0 +1,206 @@ +package gocb + +import ( + "encoding/json" + "net/url" + "strings" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" + "github.com/pkg/errors" +) + +type jsonViewResponse struct { + TotalRows uint64 `json:"total_rows,omitempty"` + DebugInfo interface{} `json:"debug_info,omitempty"` +} + +type jsonViewRow struct { + ID string `json:"id"` + Key json.RawMessage `json:"key"` + Value json.RawMessage `json:"value"` +} + +// ViewMetaData provides access to the meta-data properties of a view query result. +type ViewMetaData struct { + TotalRows uint64 + Debug interface{} +} + +func (meta *ViewMetaData) fromData(data jsonViewResponse) error { + meta.TotalRows = data.TotalRows + meta.Debug = data.DebugInfo + + return nil +} + +// ViewRow represents a single row returned from a view query. +type ViewRow struct { + ID string + keyBytes []byte + valueBytes []byte +} + +// Key returns the key associated with this view row. +func (vr *ViewRow) Key(valuePtr interface{}) error { + return json.Unmarshal(vr.keyBytes, valuePtr) +} + +// Value returns the value associated with this view row. +func (vr *ViewRow) Value(valuePtr interface{}) error { + return json.Unmarshal(vr.valueBytes, valuePtr) +} + +type viewRowReader interface { + NextRow() []byte + Err() error + MetaData() ([]byte, error) + Close() error +} + +// ViewResult implements an iterator interface which can be used to iterate over the rows of the query results. +type ViewResult struct { + reader viewRowReader + + currentRow ViewRow +} + +func newViewResult(reader viewRowReader) *ViewResult { + return &ViewResult{ + reader: reader, + } +} + +// Next assigns the next result from the results into the value pointer, returning whether the read was successful. +func (r *ViewResult) Next() bool { + rowBytes := r.reader.NextRow() + if rowBytes == nil { + return false + } + + r.currentRow = ViewRow{} + + var rowData jsonViewRow + if err := json.Unmarshal(rowBytes, &rowData); err == nil { + r.currentRow.ID = rowData.ID + r.currentRow.keyBytes = rowData.Key + r.currentRow.valueBytes = rowData.Value + } + + return true +} + +// Row returns the contents of the current row. +func (r *ViewResult) Row() ViewRow { + return r.currentRow +} + +// Err returns any errors that have occurred on the stream +func (r *ViewResult) Err() error { + return r.reader.Err() +} + +// Close marks the results as closed, returning any errors that occurred during reading the results. +func (r *ViewResult) Close() error { + return r.reader.Close() +} + +// MetaData returns any meta-data that was available from this query. Note that +// the meta-data will only be available once the object has been closed (either +// implicitly or explicitly). +func (r *ViewResult) MetaData() (*ViewMetaData, error) { + metaDataBytes, err := r.reader.MetaData() + if err != nil { + return nil, err + } + + var jsonResp jsonViewResponse + err = json.Unmarshal(metaDataBytes, &jsonResp) + if err != nil { + return nil, err + } + + var metaData ViewMetaData + err = metaData.fromData(jsonResp) + if err != nil { + return nil, err + } + + return &metaData, nil +} + +// ViewQuery performs a view query and returns a list of rows or an error. +func (b *Bucket) ViewQuery(designDoc string, viewName string, opts *ViewOptions) (*ViewResult, error) { + if opts == nil { + opts = &ViewOptions{} + } + + span := b.tracer.StartSpan("ViewQuery", opts.parentSpan). + SetTag("couchbase.service", "view") + defer span.Finish() + + designDoc = b.maybePrefixDevDocument(opts.Namespace, designDoc) + + timeout := opts.Timeout + if timeout == 0 { + timeout = b.timeoutsConfig.ViewTimeout + } + deadline := time.Now().Add(timeout) + + retryWrapper := b.retryStrategyWrapper + if opts.RetryStrategy != nil { + retryWrapper = newRetryStrategyWrapper(opts.RetryStrategy) + } + + urlValues, err := opts.toURLValues() + if err != nil { + return nil, errors.Wrap(err, "could not parse query options") + } + + return b.execViewQuery(span.Context(), "_view", designDoc, viewName, *urlValues, deadline, retryWrapper) +} + +func (b *Bucket) execViewQuery( + span requestSpanContext, + viewType, ddoc, viewName string, + options url.Values, + deadline time.Time, + wrapper *retryStrategyWrapper, +) (*ViewResult, error) { + provider, err := b.connectionManager.getViewProvider() + if err != nil { + return nil, ViewError{ + InnerError: wrapError(err, "failed to get query provider"), + DesignDocumentName: ddoc, + ViewName: viewName, + } + } + + res, err := provider.ViewQuery(gocbcore.ViewQueryOptions{ + DesignDocumentName: ddoc, + ViewType: viewType, + ViewName: viewName, + Options: options, + RetryStrategy: wrapper, + Deadline: deadline, + TraceContext: span, + }) + if err != nil { + return nil, maybeEnhanceViewError(err) + } + + return newViewResult(res), nil +} + +func (b *Bucket) maybePrefixDevDocument(namespace DesignDocumentNamespace, ddoc string) string { + designDoc := ddoc + if namespace == DesignDocumentNamespaceProduction { + designDoc = strings.TrimPrefix(ddoc, "dev_") + } else { + if !strings.HasPrefix(ddoc, "dev_") { + designDoc = "dev_" + ddoc + } + } + + return designDoc +} diff --git a/vendor/github.com/couchbase/gocb/v2/circuitbreaker.go b/vendor/github.com/couchbase/gocb/v2/circuitbreaker.go new file mode 100644 index 000000000000..71d4be33040c --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/circuitbreaker.go @@ -0,0 +1,18 @@ +package gocb + +import "time" + +// CircuitBreakerCallback is the callback used by the circuit breaker to determine if an error should count toward +// the circuit breaker failure count. +type CircuitBreakerCallback func(error) bool + +// CircuitBreakerConfig are the settings for configuring circuit breakers. +type CircuitBreakerConfig struct { + Disabled bool + VolumeThreshold int64 + ErrorThresholdPercentage float64 + SleepWindow time.Duration + RollingWindow time.Duration + CompletionCallback CircuitBreakerCallback + CanaryTimeout time.Duration +} diff --git a/vendor/github.com/couchbase/gocb/v2/client.go b/vendor/github.com/couchbase/gocb/v2/client.go new file mode 100644 index 000000000000..45c1fb47ae1b --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/client.go @@ -0,0 +1,231 @@ +package gocb + +import ( + "crypto/x509" + "sync" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" + "github.com/pkg/errors" +) + +type connectionManager interface { + connect() error + openBucket(bucketName string) error + buildConfig(cluster *Cluster) error + getKvProvider(bucketName string) (kvProvider, error) + getViewProvider() (viewProvider, error) + getQueryProvider() (queryProvider, error) + getAnalyticsProvider() (analyticsProvider, error) + getSearchProvider() (searchProvider, error) + getHTTPProvider() (httpProvider, error) + getDiagnosticsProvider(bucketName string) (diagnosticsProvider, error) + getWaitUntilReadyProvider(bucketName string) (waitUntilReadyProvider, error) + connection(bucketName string) (*gocbcore.Agent, error) + close() error +} + +type stdConnectionMgr struct { + lock sync.Mutex + agentgroup *gocbcore.AgentGroup + config *gocbcore.AgentGroupConfig +} + +func newConnectionMgr() *stdConnectionMgr { + client := &stdConnectionMgr{} + return client +} + +func (c *stdConnectionMgr) buildConfig(cluster *Cluster) error { + c.lock.Lock() + defer c.lock.Unlock() + + breakerCfg := cluster.circuitBreakerConfig + + var completionCallback func(err error) bool + if breakerCfg.CompletionCallback != nil { + completionCallback = func(err error) bool { + wrappedErr := maybeEnhanceKVErr(err, "", "", "", "") + return breakerCfg.CompletionCallback(wrappedErr) + } + } + + var tlsRootCAProvider func() *x509.CertPool + if cluster.internalConfig.TLSRootCAProvider == nil { + tlsRootCAProvider = func() *x509.CertPool { + if cluster.securityConfig.TLSSkipVerify { + return nil + } + + return cluster.securityConfig.TLSRootCAs + } + } else { + tlsRootCAProvider = cluster.internalConfig.TLSRootCAProvider + } + + config := &gocbcore.AgentGroupConfig{ + AgentConfig: gocbcore.AgentConfig{ + UserAgent: Identifier(), + TLSRootCAProvider: tlsRootCAProvider, + ConnectTimeout: cluster.timeoutsConfig.ConnectTimeout, + UseMutationTokens: cluster.useMutationTokens, + KVConnectTimeout: 7000 * time.Millisecond, + UseDurations: cluster.useServerDurations, + UseCollections: true, + UseZombieLogger: cluster.orphanLoggerEnabled, + ZombieLoggerInterval: cluster.orphanLoggerInterval, + ZombieLoggerSampleSize: int(cluster.orphanLoggerSampleSize), + NoRootTraceSpans: true, + Tracer: &requestTracerWrapper{cluster.tracer}, + CircuitBreakerConfig: gocbcore.CircuitBreakerConfig{ + Enabled: !breakerCfg.Disabled, + VolumeThreshold: breakerCfg.VolumeThreshold, + ErrorThresholdPercentage: breakerCfg.ErrorThresholdPercentage, + SleepWindow: breakerCfg.SleepWindow, + RollingWindow: breakerCfg.RollingWindow, + CanaryTimeout: breakerCfg.CanaryTimeout, + CompletionCallback: completionCallback, + }, + DefaultRetryStrategy: cluster.retryStrategyWrapper, + }, + } + + err := config.FromConnStr(cluster.connSpec().String()) + if err != nil { + return err + } + + config.Auth = &coreAuthWrapper{ + auth: cluster.authenticator(), + } + + c.config = config + return nil +} + +func (c *stdConnectionMgr) connect() error { + c.lock.Lock() + defer c.lock.Unlock() + var err error + c.agentgroup, err = gocbcore.CreateAgentGroup(c.config) + if err != nil { + return maybeEnhanceKVErr(err, "", "", "", "") + } + + return nil +} + +func (c *stdConnectionMgr) openBucket(bucketName string) error { + if c.agentgroup == nil { + return errors.New("cluster not yet connected") + } + + return c.agentgroup.OpenBucket(bucketName) +} + +func (c *stdConnectionMgr) getKvProvider(bucketName string) (kvProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + agent := c.agentgroup.GetAgent(bucketName) + if agent == nil { + return nil, errors.New("bucket not yet connected") + } + return agent, nil +} + +func (c *stdConnectionMgr) getViewProvider() (viewProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + return &viewProviderWrapper{provider: c.agentgroup}, nil +} + +func (c *stdConnectionMgr) getQueryProvider() (queryProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + return &queryProviderWrapper{provider: c.agentgroup}, nil +} + +func (c *stdConnectionMgr) getAnalyticsProvider() (analyticsProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + return &analyticsProviderWrapper{provider: c.agentgroup}, nil +} + +func (c *stdConnectionMgr) getSearchProvider() (searchProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + return &searchProviderWrapper{provider: c.agentgroup}, nil +} + +func (c *stdConnectionMgr) getHTTPProvider() (httpProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + return &httpProviderWrapper{provider: c.agentgroup}, nil +} + +func (c *stdConnectionMgr) getDiagnosticsProvider(bucketName string) (diagnosticsProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + if bucketName == "" { + return &diagnosticsProviderWrapper{provider: c.agentgroup}, nil + } + + agent := c.agentgroup.GetAgent(bucketName) + if agent == nil { + return nil, errors.New("bucket not yet connected") + } + + return &diagnosticsProviderWrapper{provider: agent}, nil +} + +func (c *stdConnectionMgr) getWaitUntilReadyProvider(bucketName string) (waitUntilReadyProvider, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + if bucketName == "" { + return &waitUntilReadyProviderWrapper{provider: c.agentgroup}, nil + } + + agent := c.agentgroup.GetAgent(bucketName) + if agent == nil { + return nil, errors.New("provider not yet connected") + } + + return &waitUntilReadyProviderWrapper{provider: agent}, nil +} + +func (c *stdConnectionMgr) connection(bucketName string) (*gocbcore.Agent, error) { + if c.agentgroup == nil { + return nil, errors.New("cluster not yet connected") + } + + agent := c.agentgroup.GetAgent(bucketName) + if agent == nil { + return nil, errors.New("bucket not yet connected") + } + return agent, nil +} + +func (c *stdConnectionMgr) close() error { + c.lock.Lock() + if c.agentgroup == nil { + c.lock.Unlock() + return errors.New("cluster not yet connected") + } + defer c.lock.Unlock() + return c.agentgroup.Close() +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster.go b/vendor/github.com/couchbase/gocb/v2/cluster.go new file mode 100644 index 000000000000..c0d3ce2154c0 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster.go @@ -0,0 +1,474 @@ +package gocb + +import ( + "crypto/x509" + "fmt" + "strconv" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" + gocbconnstr "github.com/couchbase/gocbcore/v9/connstr" + "github.com/pkg/errors" +) + +// Cluster represents a connection to a specific Couchbase cluster. +type Cluster struct { + cSpec gocbconnstr.ConnSpec + auth Authenticator + + connectionManager connectionManager + + useServerDurations bool + useMutationTokens bool + + timeoutsConfig TimeoutsConfig + + transcoder Transcoder + retryStrategyWrapper *retryStrategyWrapper + + orphanLoggerEnabled bool + orphanLoggerInterval time.Duration + orphanLoggerSampleSize uint32 + + tracer requestTracer + + circuitBreakerConfig CircuitBreakerConfig + securityConfig SecurityConfig + internalConfig InternalConfig +} + +// IoConfig specifies IO related configuration options. +type IoConfig struct { + DisableMutationTokens bool + DisableServerDurations bool +} + +// TimeoutsConfig specifies options for various operation timeouts. +type TimeoutsConfig struct { + ConnectTimeout time.Duration + KVTimeout time.Duration + // Volatile: This option is subject to change at any time. + KVDurableTimeout time.Duration + ViewTimeout time.Duration + QueryTimeout time.Duration + AnalyticsTimeout time.Duration + SearchTimeout time.Duration + ManagementTimeout time.Duration +} + +// OrphanReporterConfig specifies options for controlling the orphan +// reporter which records when the SDK receives responses for requests +// that are no longer in the system (usually due to being timed out). +type OrphanReporterConfig struct { + Disabled bool + ReportInterval time.Duration + SampleSize uint32 +} + +// SecurityConfig specifies options for controlling security related +// items such as TLS root certificates and verification skipping. +type SecurityConfig struct { + TLSRootCAs *x509.CertPool + TLSSkipVerify bool +} + +// InternalConfig specifies options for controlling various internal +// items. +// Internal: This should never be used and is not supported. +type InternalConfig struct { + TLSRootCAProvider func() *x509.CertPool +} + +// ClusterOptions is the set of options available for creating a Cluster. +type ClusterOptions struct { + // Authenticator specifies the authenticator to use with the cluster. + Authenticator Authenticator + + // Username & Password specifies the cluster username and password to + // authenticate with. This is equivalent to passing PasswordAuthenticator + // as the Authenticator parameter with the same values. + Username string + Password string + + // Timeouts specifies various operation timeouts. + TimeoutsConfig TimeoutsConfig + + // Transcoder is used for trancoding data used in KV operations. + Transcoder Transcoder + + // RetryStrategy is used to automatically retry operations if they fail. + RetryStrategy RetryStrategy + + // Tracer specifies the tracer to use for requests. + // VOLATILE: This API is subject to change at any time. + Tracer requestTracer + + // OrphanReporterConfig specifies options for the orphan reporter. + OrphanReporterConfig OrphanReporterConfig + + // CircuitBreakerConfig specifies options for the circuit breakers. + CircuitBreakerConfig CircuitBreakerConfig + + // IoConfig specifies IO related configuration options. + IoConfig IoConfig + + // SecurityConfig specifies security related configuration options. + SecurityConfig SecurityConfig + + // Internal: This should never be used and is not supported. + InternalConfig InternalConfig +} + +// ClusterCloseOptions is the set of options available when +// disconnecting from a Cluster. +type ClusterCloseOptions struct { +} + +func clusterFromOptions(opts ClusterOptions) *Cluster { + if opts.Authenticator == nil { + opts.Authenticator = PasswordAuthenticator{ + Username: opts.Username, + Password: opts.Password, + } + } + + connectTimeout := 10000 * time.Millisecond + kvTimeout := 2500 * time.Millisecond + kvDurableTimeout := 10000 * time.Millisecond + viewTimeout := 75000 * time.Millisecond + queryTimeout := 75000 * time.Millisecond + analyticsTimeout := 75000 * time.Millisecond + searchTimeout := 75000 * time.Millisecond + managementTimeout := 75000 * time.Millisecond + if opts.TimeoutsConfig.ConnectTimeout > 0 { + connectTimeout = opts.TimeoutsConfig.ConnectTimeout + } + if opts.TimeoutsConfig.KVTimeout > 0 { + kvTimeout = opts.TimeoutsConfig.KVTimeout + } + if opts.TimeoutsConfig.KVDurableTimeout > 0 { + kvDurableTimeout = opts.TimeoutsConfig.KVDurableTimeout + } + if opts.TimeoutsConfig.ViewTimeout > 0 { + viewTimeout = opts.TimeoutsConfig.ViewTimeout + } + if opts.TimeoutsConfig.QueryTimeout > 0 { + queryTimeout = opts.TimeoutsConfig.QueryTimeout + } + if opts.TimeoutsConfig.AnalyticsTimeout > 0 { + analyticsTimeout = opts.TimeoutsConfig.AnalyticsTimeout + } + if opts.TimeoutsConfig.SearchTimeout > 0 { + searchTimeout = opts.TimeoutsConfig.SearchTimeout + } + if opts.TimeoutsConfig.ManagementTimeout > 0 { + managementTimeout = opts.TimeoutsConfig.ManagementTimeout + } + if opts.Transcoder == nil { + opts.Transcoder = NewJSONTranscoder() + } + if opts.RetryStrategy == nil { + opts.RetryStrategy = NewBestEffortRetryStrategy(nil) + } + + useMutationTokens := true + useServerDurations := true + if opts.IoConfig.DisableMutationTokens { + useMutationTokens = false + } + if opts.IoConfig.DisableServerDurations { + useServerDurations = false + } + + var initialTracer requestTracer + if opts.Tracer != nil { + initialTracer = opts.Tracer + } else { + initialTracer = newThresholdLoggingTracer(nil) + } + tracerAddRef(initialTracer) + + return &Cluster{ + auth: opts.Authenticator, + timeoutsConfig: TimeoutsConfig{ + ConnectTimeout: connectTimeout, + QueryTimeout: queryTimeout, + AnalyticsTimeout: analyticsTimeout, + SearchTimeout: searchTimeout, + ViewTimeout: viewTimeout, + KVTimeout: kvTimeout, + KVDurableTimeout: kvDurableTimeout, + ManagementTimeout: managementTimeout, + }, + transcoder: opts.Transcoder, + useMutationTokens: useMutationTokens, + retryStrategyWrapper: newRetryStrategyWrapper(opts.RetryStrategy), + orphanLoggerEnabled: !opts.OrphanReporterConfig.Disabled, + orphanLoggerInterval: opts.OrphanReporterConfig.ReportInterval, + orphanLoggerSampleSize: opts.OrphanReporterConfig.SampleSize, + useServerDurations: useServerDurations, + tracer: initialTracer, + circuitBreakerConfig: opts.CircuitBreakerConfig, + securityConfig: opts.SecurityConfig, + internalConfig: opts.InternalConfig, + } +} + +// Connect creates and returns a Cluster instance created using the +// provided options and a connection string. +func Connect(connStr string, opts ClusterOptions) (*Cluster, error) { + connSpec, err := gocbconnstr.Parse(connStr) + if err != nil { + return nil, err + } + + if connSpec.Scheme == "http" { + return nil, errors.New("http scheme is not supported, use couchbase or couchbases instead") + } + + cluster := clusterFromOptions(opts) + cluster.cSpec = connSpec + + err = cluster.parseExtraConnStrOptions(connSpec) + if err != nil { + return nil, err + } + + cli := newConnectionMgr() + err = cli.buildConfig(cluster) + if err != nil { + return nil, err + } + + err = cli.connect() + if err != nil { + return nil, err + } + cluster.connectionManager = cli + + return cluster, nil +} + +func (c *Cluster) parseExtraConnStrOptions(spec gocbconnstr.ConnSpec) error { + fetchOption := func(name string) (string, bool) { + optValue := spec.Options[name] + if len(optValue) == 0 { + return "", false + } + return optValue[len(optValue)-1], true + } + + if valStr, ok := fetchOption("query_timeout"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("query_timeout option must be a number") + } + c.timeoutsConfig.QueryTimeout = time.Duration(val) * time.Millisecond + } + + if valStr, ok := fetchOption("analytics_timeout"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("analytics_timeout option must be a number") + } + c.timeoutsConfig.AnalyticsTimeout = time.Duration(val) * time.Millisecond + } + + if valStr, ok := fetchOption("search_timeout"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("search_timeout option must be a number") + } + c.timeoutsConfig.SearchTimeout = time.Duration(val) * time.Millisecond + } + + if valStr, ok := fetchOption("view_timeout"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("view_timeout option must be a number") + } + c.timeoutsConfig.ViewTimeout = time.Duration(val) * time.Millisecond + } + + return nil +} + +// Bucket connects the cluster to server(s) and returns a new Bucket instance. +func (c *Cluster) Bucket(bucketName string) *Bucket { + b := newBucket(c, bucketName) + err := c.connectionManager.openBucket(bucketName) + if err != nil { + b.setBootstrapError(err) + } + + return b +} + +func (c *Cluster) authenticator() Authenticator { + return c.auth +} + +func (c *Cluster) connSpec() gocbconnstr.ConnSpec { + return c.cSpec +} + +// WaitUntilReadyOptions is the set of options available to the WaitUntilReady operations. +type WaitUntilReadyOptions struct { + DesiredState ClusterState + ServiceTypes []ServiceType +} + +// WaitUntilReady will wait for the cluster object to be ready for use. +// At present this will wait until memd connections have been established with the server and are ready +// to be used before performing a ping against the specified services which also +// exist in the cluster map. +// If no services are specified then ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, ServiceTypeAnalytics +// will be pinged. +// Valid service types are: ServiceTypeManagement, ServiceTypeQuery, ServiceTypeSearch, ServiceTypeAnalytics. +func (c *Cluster) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error { + if opts == nil { + opts = &WaitUntilReadyOptions{} + } + + cli := c.connectionManager + if cli == nil { + return errors.New("cluster is not connected") + } + + provider, err := cli.getWaitUntilReadyProvider("") + if err != nil { + return err + } + + desiredState := opts.DesiredState + if desiredState == 0 { + desiredState = ClusterStateOnline + } + + services := opts.ServiceTypes + gocbcoreServices := make([]gocbcore.ServiceType, len(services)) + for i, svc := range services { + gocbcoreServices[i] = gocbcore.ServiceType(svc) + } + + err = provider.WaitUntilReady( + time.Now().Add(timeout), + gocbcore.WaitUntilReadyOptions{ + DesiredState: gocbcore.ClusterState(desiredState), + ServiceTypes: gocbcoreServices, + }, + ) + if err != nil { + return err + } + + return nil +} + +// Close shuts down all buckets in this cluster and invalidates any references this cluster has. +func (c *Cluster) Close(opts *ClusterCloseOptions) error { + var overallErr error + + if c.connectionManager != nil { + err := c.connectionManager.close() + if err != nil { + logWarnf("Failed to close cluster connectionManager in cluster close: %s", err) + overallErr = err + } + } + + if c.tracer != nil { + tracerDecRef(c.tracer) + c.tracer = nil + } + + return overallErr +} + +func (c *Cluster) getDiagnosticsProvider() (diagnosticsProvider, error) { + provider, err := c.connectionManager.getDiagnosticsProvider("") + if err != nil { + return nil, err + } + + return provider, nil +} + +func (c *Cluster) getQueryProvider() (queryProvider, error) { + provider, err := c.connectionManager.getQueryProvider() + if err != nil { + return nil, err + } + + return provider, nil +} + +func (c *Cluster) getAnalyticsProvider() (analyticsProvider, error) { + provider, err := c.connectionManager.getAnalyticsProvider() + if err != nil { + return nil, err + } + + return provider, nil +} + +func (c *Cluster) getSearchProvider() (searchProvider, error) { + provider, err := c.connectionManager.getSearchProvider() + if err != nil { + return nil, err + } + + return provider, nil +} + +func (c *Cluster) getHTTPProvider() (httpProvider, error) { + provider, err := c.connectionManager.getHTTPProvider() + if err != nil { + return nil, err + } + + return provider, nil +} + +// Users returns a UserManager for managing users. +func (c *Cluster) Users() *UserManager { + return &UserManager{ + provider: c, + tracer: c.tracer, + } +} + +// Buckets returns a BucketManager for managing buckets. +func (c *Cluster) Buckets() *BucketManager { + return &BucketManager{ + provider: c, + tracer: c.tracer, + } +} + +// AnalyticsIndexes returns an AnalyticsIndexManager for managing analytics indexes. +func (c *Cluster) AnalyticsIndexes() *AnalyticsIndexManager { + return &AnalyticsIndexManager{ + aProvider: c, + mgmtProvider: c, + globalTimeout: c.timeoutsConfig.ManagementTimeout, + tracer: c.tracer, + } +} + +// QueryIndexes returns a QueryIndexManager for managing query indexes. +func (c *Cluster) QueryIndexes() *QueryIndexManager { + return &QueryIndexManager{ + provider: c, + globalTimeout: c.timeoutsConfig.ManagementTimeout, + tracer: c.tracer, + } +} + +// SearchIndexes returns a SearchIndexManager for managing search indexes. +func (c *Cluster) SearchIndexes() *SearchIndexManager { + return &SearchIndexManager{ + mgmtProvider: c, + tracer: c.tracer, + } +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_analyticsindexes.go b/vendor/github.com/couchbase/gocb/v2/cluster_analyticsindexes.go new file mode 100644 index 000000000000..bcc46ed58baf --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_analyticsindexes.go @@ -0,0 +1,597 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +// AnalyticsIndexManager provides methods for performing Couchbase Analytics index management. +type AnalyticsIndexManager struct { + aProvider analyticsIndexQueryProvider + mgmtProvider mgmtProvider + + globalTimeout time.Duration + tracer requestTracer +} + +type analyticsIndexQueryProvider interface { + AnalyticsQuery(statement string, opts *AnalyticsOptions) (*AnalyticsResult, error) +} + +func (am *AnalyticsIndexManager) doAnalyticsQuery(q string, opts *AnalyticsOptions) ([][]byte, error) { + if opts.Timeout == 0 { + opts.Timeout = am.globalTimeout + } + + result, err := am.aProvider.AnalyticsQuery(q, opts) + if err != nil { + return nil, err + } + + var rows [][]byte + for result.Next() { + var row json.RawMessage + err := result.Row(&row) + if err != nil { + logWarnf("management operation failed to read row: %s", err) + } else { + rows = append(rows, row) + } + } + err = result.Err() + if err != nil { + return nil, err + } + + return rows, nil +} + +func (am *AnalyticsIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) { + resp, err := am.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return nil, err + } + + return resp, nil +} + +type jsonAnalyticsDataset struct { + DatasetName string `json:"DatasetName"` + DataverseName string `json:"DataverseName"` + LinkName string `json:"LinkName"` + BucketName string `json:"BucketName"` +} + +type jsonAnalyticsIndex struct { + IndexName string `json:"IndexName"` + DatasetName string `json:"DatasetName"` + DataverseName string `json:"DataverseName"` + IsPrimary bool `json:"IsPrimary"` +} + +// AnalyticsDataset contains information about an analytics dataset. +type AnalyticsDataset struct { + Name string + DataverseName string + LinkName string + BucketName string +} + +func (ad *AnalyticsDataset) fromData(data jsonAnalyticsDataset) error { + ad.Name = data.DatasetName + ad.DataverseName = data.DataverseName + ad.LinkName = data.LinkName + ad.BucketName = data.BucketName + + return nil +} + +// AnalyticsIndex contains information about an analytics index. +type AnalyticsIndex struct { + Name string + DatasetName string + DataverseName string + IsPrimary bool +} + +func (ai *AnalyticsIndex) fromData(data jsonAnalyticsIndex) error { + ai.Name = data.IndexName + ai.DatasetName = data.DatasetName + ai.DataverseName = data.DataverseName + ai.IsPrimary = data.IsPrimary + + return nil +} + +// CreateAnalyticsDataverseOptions is the set of options available to the AnalyticsManager CreateDataverse operation. +type CreateAnalyticsDataverseOptions struct { + IgnoreIfExists bool + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateDataverse creates a new analytics dataset. +func (am *AnalyticsIndexManager) CreateDataverse(dataverseName string, opts *CreateAnalyticsDataverseOptions) error { + if opts == nil { + opts = &CreateAnalyticsDataverseOptions{} + } + + if dataverseName == "" { + return invalidArgumentsError{ + message: "dataset name cannot be empty", + } + } + + span := am.tracer.StartSpan("CreateDataverse", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + var ignoreStr string + if opts.IgnoreIfExists { + ignoreStr = "IF NOT EXISTS" + } + + q := fmt.Sprintf("CREATE DATAVERSE `%s` %s", dataverseName, ignoreStr) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// DropAnalyticsDataverseOptions is the set of options available to the AnalyticsManager DropDataverse operation. +type DropAnalyticsDataverseOptions struct { + IgnoreIfNotExists bool + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropDataverse drops an analytics dataset. +func (am *AnalyticsIndexManager) DropDataverse(dataverseName string, opts *DropAnalyticsDataverseOptions) error { + if opts == nil { + opts = &DropAnalyticsDataverseOptions{} + } + + span := am.tracer.StartSpan("DropDataverse", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + var ignoreStr string + if opts.IgnoreIfNotExists { + ignoreStr = "IF EXISTS" + } + + q := fmt.Sprintf("DROP DATAVERSE %s %s", dataverseName, ignoreStr) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return err +} + +// CreateAnalyticsDatasetOptions is the set of options available to the AnalyticsManager CreateDataset operation. +type CreateAnalyticsDatasetOptions struct { + IgnoreIfExists bool + Condition string + DataverseName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateDataset creates a new analytics dataset. +func (am *AnalyticsIndexManager) CreateDataset(datasetName, bucketName string, opts *CreateAnalyticsDatasetOptions) error { + if opts == nil { + opts = &CreateAnalyticsDatasetOptions{} + } + + if datasetName == "" { + return invalidArgumentsError{ + message: "dataset name cannot be empty", + } + } + + span := am.tracer.StartSpan("CreateDataset", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + var ignoreStr string + if opts.IgnoreIfExists { + ignoreStr = "IF NOT EXISTS" + } + + var where string + if opts.Condition != "" { + if !strings.HasPrefix(strings.ToUpper(opts.Condition), "WHERE") { + where = "WHERE " + } + where += opts.Condition + } + + if opts.DataverseName == "" { + datasetName = fmt.Sprintf("`%s`", datasetName) + } else { + datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName) + } + + q := fmt.Sprintf("CREATE DATASET %s %s ON `%s` %s", ignoreStr, datasetName, bucketName, where) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// DropAnalyticsDatasetOptions is the set of options available to the AnalyticsManager DropDataset operation. +type DropAnalyticsDatasetOptions struct { + IgnoreIfNotExists bool + DataverseName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropDataset drops an analytics dataset. +func (am *AnalyticsIndexManager) DropDataset(datasetName string, opts *DropAnalyticsDatasetOptions) error { + if opts == nil { + opts = &DropAnalyticsDatasetOptions{} + } + + span := am.tracer.StartSpan("DropDataset", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + var ignoreStr string + if opts.IgnoreIfNotExists { + ignoreStr = "IF EXISTS" + } + + if opts.DataverseName == "" { + datasetName = fmt.Sprintf("`%s`", datasetName) + } else { + datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName) + } + + q := fmt.Sprintf("DROP DATASET %s %s", datasetName, ignoreStr) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// GetAllAnalyticsDatasetsOptions is the set of options available to the AnalyticsManager GetAllDatasets operation. +type GetAllAnalyticsDatasetsOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllDatasets gets all analytics datasets. +func (am *AnalyticsIndexManager) GetAllDatasets(opts *GetAllAnalyticsDatasetsOptions) ([]AnalyticsDataset, error) { + if opts == nil { + opts = &GetAllAnalyticsDatasetsOptions{} + } + + span := am.tracer.StartSpan("GetAllDatasets", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + q := "SELECT d.* FROM Metadata.`Dataset` d WHERE d.DataverseName <> \"Metadata\"" + rows, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return nil, err + } + + datasets := make([]AnalyticsDataset, len(rows)) + for rowIdx, row := range rows { + var datasetData jsonAnalyticsDataset + err := json.Unmarshal(row, &datasetData) + if err != nil { + return nil, err + } + + err = datasets[rowIdx].fromData(datasetData) + if err != nil { + return nil, err + } + } + + return datasets, nil +} + +// CreateAnalyticsIndexOptions is the set of options available to the AnalyticsManager CreateIndex operation. +type CreateAnalyticsIndexOptions struct { + IgnoreIfExists bool + DataverseName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateIndex creates a new analytics dataset. +func (am *AnalyticsIndexManager) CreateIndex(datasetName, indexName string, fields map[string]string, opts *CreateAnalyticsIndexOptions) error { + if opts == nil { + opts = &CreateAnalyticsIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{ + message: "index name cannot be empty", + } + } + if len(fields) <= 0 { + return invalidArgumentsError{ + message: "you must specify at least one field to index", + } + } + + span := am.tracer.StartSpan("CreateIndex", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + var ignoreStr string + if opts.IgnoreIfExists { + ignoreStr = "IF NOT EXISTS" + } + + var indexFields []string + for name, typ := range fields { + indexFields = append(indexFields, name+":"+typ) + } + + if opts.DataverseName == "" { + datasetName = fmt.Sprintf("`%s`", datasetName) + } else { + datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName) + } + + q := fmt.Sprintf("CREATE INDEX `%s` %s ON %s (%s)", indexName, ignoreStr, datasetName, strings.Join(indexFields, ",")) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// DropAnalyticsIndexOptions is the set of options available to the AnalyticsManager DropIndex operation. +type DropAnalyticsIndexOptions struct { + IgnoreIfNotExists bool + DataverseName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropIndex drops an analytics index. +func (am *AnalyticsIndexManager) DropIndex(datasetName, indexName string, opts *DropAnalyticsIndexOptions) error { + if opts == nil { + opts = &DropAnalyticsIndexOptions{} + } + + span := am.tracer.StartSpan("DropIndex", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + var ignoreStr string + if opts.IgnoreIfNotExists { + ignoreStr = "IF EXISTS" + } + + if opts.DataverseName == "" { + datasetName = fmt.Sprintf("`%s`", datasetName) + } else { + datasetName = fmt.Sprintf("`%s`.`%s`", opts.DataverseName, datasetName) + } + + q := fmt.Sprintf("DROP INDEX %s.%s %s", datasetName, indexName, ignoreStr) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// GetAllAnalyticsIndexesOptions is the set of options available to the AnalyticsManager GetAllIndexes operation. +type GetAllAnalyticsIndexesOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllIndexes gets all analytics indexes. +func (am *AnalyticsIndexManager) GetAllIndexes(opts *GetAllAnalyticsIndexesOptions) ([]AnalyticsIndex, error) { + if opts == nil { + opts = &GetAllAnalyticsIndexesOptions{} + } + + span := am.tracer.StartSpan("GetAllIndexes", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + q := "SELECT d.* FROM Metadata.`Index` d WHERE d.DataverseName <> \"Metadata\"" + rows, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return nil, err + } + + indexes := make([]AnalyticsIndex, len(rows)) + for rowIdx, row := range rows { + var indexData jsonAnalyticsIndex + err := json.Unmarshal(row, &indexData) + if err != nil { + return nil, err + } + + err = indexes[rowIdx].fromData(indexData) + if err != nil { + return nil, err + } + } + + return indexes, nil +} + +// ConnectAnalyticsLinkOptions is the set of options available to the AnalyticsManager ConnectLink operation. +type ConnectAnalyticsLinkOptions struct { + LinkName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// ConnectLink connects an analytics link. +func (am *AnalyticsIndexManager) ConnectLink(opts *ConnectAnalyticsLinkOptions) error { + if opts == nil { + opts = &ConnectAnalyticsLinkOptions{} + } + + span := am.tracer.StartSpan("ConnectLink", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + if opts.LinkName == "" { + opts.LinkName = "Local" + } + + q := fmt.Sprintf("CONNECT LINK %s", opts.LinkName) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// DisconnectAnalyticsLinkOptions is the set of options available to the AnalyticsManager DisconnectLink operation. +type DisconnectAnalyticsLinkOptions struct { + LinkName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DisconnectLink disconnects an analytics link. +func (am *AnalyticsIndexManager) DisconnectLink(opts *DisconnectAnalyticsLinkOptions) error { + if opts == nil { + opts = &DisconnectAnalyticsLinkOptions{} + } + + span := am.tracer.StartSpan("DisconnectLink", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + if opts.LinkName == "" { + opts.LinkName = "Local" + } + + q := fmt.Sprintf("DISCONNECT LINK %s", opts.LinkName) + _, err := am.doAnalyticsQuery(q, &AnalyticsOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return err + } + + return nil +} + +// GetPendingMutationsAnalyticsOptions is the set of options available to the user manager GetPendingMutations operation. +type GetPendingMutationsAnalyticsOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetPendingMutations returns the number of pending mutations for all indexes in the form of dataverse.dataset:mutations. +func (am *AnalyticsIndexManager) GetPendingMutations(opts *GetPendingMutationsAnalyticsOptions) (map[string]uint64, error) { + if opts == nil { + opts = &GetPendingMutationsAnalyticsOptions{} + } + + span := am.tracer.StartSpan("GetPendingMutations", nil). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + timeout := opts.Timeout + if timeout == 0 { + timeout = am.globalTimeout + } + + req := mgmtRequest{ + Service: ServiceTypeAnalytics, + Method: "GET", + Path: "/analytics/node/agg/stats/remaining", + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + Timeout: timeout, + parentSpan: span.Context(), + } + resp, err := am.doMgmtRequest(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + return nil, makeMgmtBadStatusError("failed to get pending mutations", &req, resp) + } + + pending := make(map[string]uint64) + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&pending) + if err != nil { + return nil, err + } + + err = resp.Body.Close() + if err != nil { + logDebugf("Failed to close socket (%s)", err) + } + + return pending, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_analyticsquery.go b/vendor/github.com/couchbase/gocb/v2/cluster_analyticsquery.go new file mode 100644 index 000000000000..8f0cba9eb8a1 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_analyticsquery.go @@ -0,0 +1,300 @@ +package gocb + +import ( + "encoding/json" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type jsonAnalyticsMetrics struct { + ElapsedTime string `json:"elapsedTime"` + ExecutionTime string `json:"executionTime"` + ResultCount uint64 `json:"resultCount"` + ResultSize uint64 `json:"resultSize"` + MutationCount uint64 `json:"mutationCount,omitempty"` + SortCount uint64 `json:"sortCount,omitempty"` + ErrorCount uint64 `json:"errorCount,omitempty"` + WarningCount uint64 `json:"warningCount,omitempty"` + ProcessedObjects uint64 `json:"processedObjects,omitempty"` +} + +type jsonAnalyticsWarning struct { + Code uint32 `json:"code"` + Message string `json:"msg"` +} + +type jsonAnalyticsResponse struct { + RequestID string `json:"requestID"` + ClientContextID string `json:"clientContextID"` + Status string `json:"status"` + Warnings []jsonAnalyticsWarning `json:"warnings"` + Metrics jsonAnalyticsMetrics `json:"metrics"` + Signature interface{} `json:"signature"` +} + +// AnalyticsMetrics encapsulates various metrics gathered during a queries execution. +type AnalyticsMetrics struct { + ElapsedTime time.Duration + ExecutionTime time.Duration + ResultCount uint64 + ResultSize uint64 + MutationCount uint64 + SortCount uint64 + ErrorCount uint64 + WarningCount uint64 + ProcessedObjects uint64 +} + +func (metrics *AnalyticsMetrics) fromData(data jsonAnalyticsMetrics) error { + elapsedTime, err := time.ParseDuration(data.ElapsedTime) + if err != nil { + logDebugf("Failed to parse query metrics elapsed time: %s", err) + } + + executionTime, err := time.ParseDuration(data.ExecutionTime) + if err != nil { + logDebugf("Failed to parse query metrics execution time: %s", err) + } + + metrics.ElapsedTime = elapsedTime + metrics.ExecutionTime = executionTime + metrics.ResultCount = data.ResultCount + metrics.ResultSize = data.ResultSize + metrics.MutationCount = data.MutationCount + metrics.SortCount = data.SortCount + metrics.ErrorCount = data.ErrorCount + metrics.WarningCount = data.WarningCount + metrics.ProcessedObjects = data.ProcessedObjects + + return nil +} + +// AnalyticsWarning encapsulates any warnings returned by a query. +type AnalyticsWarning struct { + Code uint32 + Message string +} + +func (warning *AnalyticsWarning) fromData(data jsonAnalyticsWarning) error { + warning.Code = data.Code + warning.Message = data.Message + + return nil +} + +// AnalyticsMetaData provides access to the meta-data properties of a query result. +type AnalyticsMetaData struct { + RequestID string + ClientContextID string + Metrics AnalyticsMetrics + Signature interface{} + Warnings []AnalyticsWarning +} + +func (meta *AnalyticsMetaData) fromData(data jsonAnalyticsResponse) error { + metrics := AnalyticsMetrics{} + if err := metrics.fromData(data.Metrics); err != nil { + return err + } + + warnings := make([]AnalyticsWarning, len(data.Warnings)) + for wIdx, jsonWarning := range data.Warnings { + err := warnings[wIdx].fromData(jsonWarning) + if err != nil { + return err + } + } + + meta.RequestID = data.RequestID + meta.ClientContextID = data.ClientContextID + meta.Metrics = metrics + meta.Signature = data.Signature + meta.Warnings = warnings + + return nil +} + +// AnalyticsResult allows access to the results of a query. +type AnalyticsResult struct { + reader analyticsRowReader + + rowBytes []byte +} + +func newAnalyticsResult(reader analyticsRowReader) *AnalyticsResult { + return &AnalyticsResult{ + reader: reader, + } +} + +type analyticsRowReader interface { + NextRow() []byte + Err() error + MetaData() ([]byte, error) + Close() error +} + +// Next assigns the next result from the results into the value pointer, returning whether the read was successful. +func (r *AnalyticsResult) Next() bool { + rowBytes := r.reader.NextRow() + if rowBytes == nil { + return false + } + + r.rowBytes = rowBytes + return true +} + +// Row returns the value of the current row +func (r *AnalyticsResult) Row(valuePtr interface{}) error { + if r.rowBytes == nil { + return ErrNoResult + } + + if bytesPtr, ok := valuePtr.(*json.RawMessage); ok { + *bytesPtr = r.rowBytes + return nil + } + + return json.Unmarshal(r.rowBytes, valuePtr) +} + +// Err returns any errors that have occurred on the stream +func (r *AnalyticsResult) Err() error { + return r.reader.Err() +} + +// Close marks the results as closed, returning any errors that occurred during reading the results. +func (r *AnalyticsResult) Close() error { + return r.reader.Close() +} + +// One assigns the first value from the results into the value pointer. +// It will close the results but not before iterating through all remaining +// results, as such this should only be used for very small resultsets - ideally +// of, at most, length 1. +func (r *AnalyticsResult) One(valuePtr interface{}) error { + // Read the bytes from the first row + valueBytes := r.reader.NextRow() + if valueBytes == nil { + return ErrNoResult + } + + // Skip through the remaining rows + for r.reader.NextRow() != nil { + // do nothing with the row + } + + return json.Unmarshal(valueBytes, valuePtr) +} + +// MetaData returns any meta-data that was available from this query. Note that +// the meta-data will only be available once the object has been closed (either +// implicitly or explicitly). +func (r *AnalyticsResult) MetaData() (*AnalyticsMetaData, error) { + metaDataBytes, err := r.reader.MetaData() + if err != nil { + return nil, err + } + + var jsonResp jsonAnalyticsResponse + err = json.Unmarshal(metaDataBytes, &jsonResp) + if err != nil { + return nil, err + } + + var metaData AnalyticsMetaData + err = metaData.fromData(jsonResp) + if err != nil { + return nil, err + } + + return &metaData, nil +} + +// AnalyticsQuery executes the analytics query statement on the server. +func (c *Cluster) AnalyticsQuery(statement string, opts *AnalyticsOptions) (*AnalyticsResult, error) { + if opts == nil { + opts = &AnalyticsOptions{} + } + + span := c.tracer.StartSpan("Query", opts.parentSpan). + SetTag("couchbase.service", "analytics") + defer span.Finish() + + timeout := opts.Timeout + if opts.Timeout == 0 { + timeout = c.timeoutsConfig.AnalyticsTimeout + } + deadline := time.Now().Add(timeout) + + retryStrategy := c.retryStrategyWrapper + if opts.RetryStrategy != nil { + retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy) + } + + queryOpts, err := opts.toMap() + if err != nil { + return nil, AnalyticsError{ + InnerError: wrapError(err, "failed to generate query options"), + Statement: statement, + ClientContextID: opts.ClientContextID, + } + } + + var priorityInt int32 + if opts.Priority { + priorityInt = -1 + } + + queryOpts["statement"] = statement + + return c.execAnalyticsQuery(span, queryOpts, priorityInt, deadline, retryStrategy) +} + +func maybeGetAnalyticsOption(options map[string]interface{}, name string) string { + if value, ok := options[name].(string); ok { + return value + } + return "" +} + +func (c *Cluster) execAnalyticsQuery( + span requestSpan, + options map[string]interface{}, + priority int32, + deadline time.Time, + retryStrategy *retryStrategyWrapper, +) (*AnalyticsResult, error) { + provider, err := c.getAnalyticsProvider() + if err != nil { + return nil, AnalyticsError{ + InnerError: wrapError(err, "failed to get query provider"), + Statement: maybeGetAnalyticsOption(options, "statement"), + ClientContextID: maybeGetAnalyticsOption(options, "client_context_id"), + } + } + + reqBytes, err := json.Marshal(options) + if err != nil { + return nil, AnalyticsError{ + InnerError: wrapError(err, "failed to marshall query body"), + Statement: maybeGetAnalyticsOption(options, "statement"), + ClientContextID: maybeGetAnalyticsOption(options, "client_context_id"), + } + } + + res, err := provider.AnalyticsQuery(gocbcore.AnalyticsQueryOptions{ + Payload: reqBytes, + Priority: int(priority), + RetryStrategy: retryStrategy, + Deadline: deadline, + TraceContext: span.Context(), + }) + if err != nil { + return nil, maybeEnhanceAnalyticsError(err) + } + + return newAnalyticsResult(res), nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_bucketmgr.go b/vendor/github.com/couchbase/gocb/v2/cluster_bucketmgr.go new file mode 100644 index 000000000000..3ec0d215eccb --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_bucketmgr.go @@ -0,0 +1,600 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" +) + +// BucketType specifies the kind of bucket. +type BucketType string + +const ( + // CouchbaseBucketType indicates a Couchbase bucket type. + CouchbaseBucketType BucketType = "membase" + + // MemcachedBucketType indicates a Memcached bucket type. + MemcachedBucketType BucketType = "memcached" + + // EphemeralBucketType indicates an Ephemeral bucket type. + EphemeralBucketType BucketType = "ephemeral" +) + +// ConflictResolutionType specifies the kind of conflict resolution to use for a bucket. +type ConflictResolutionType string + +const ( + // ConflictResolutionTypeTimestamp specifies to use timestamp conflict resolution on the bucket. + ConflictResolutionTypeTimestamp ConflictResolutionType = "lww" + + // ConflictResolutionTypeSequenceNumber specifies to use sequence number conflict resolution on the bucket. + ConflictResolutionTypeSequenceNumber ConflictResolutionType = "seqno" +) + +// EvictionPolicyType specifies the kind of eviction policy to use for a bucket. +type EvictionPolicyType string + +const ( + // EvictionPolicyTypeFull specifies to use full eviction for a couchbase bucket. + EvictionPolicyTypeFull EvictionPolicyType = "fullEviction" + + // EvictionPolicyTypeValueOnly specifies to use value only eviction for a couchbase bucket. + EvictionPolicyTypeValueOnly EvictionPolicyType = "valueOnly" + + // EvictionPolicyTypeNotRecentlyUsed specifies to use not recently used (nru) eviction for an ephemeral bucket. + // UNCOMMITTED: This API may change in the future. + EvictionPolicyTypeNotRecentlyUsed EvictionPolicyType = "nruEviction" + + // EvictionPolicyTypeNRU specifies to use no eviction for an ephemeral bucket. + // UNCOMMITTED: This API may change in the future. + EvictionPolicyTypeNoEviction EvictionPolicyType = "noEviction" +) + +// CompressionMode specifies the kind of compression to use for a bucket. +type CompressionMode string + +const ( + // CompressionModeOff specifies to use no compression for a bucket. + CompressionModeOff CompressionMode = "off" + + // CompressionModePassive specifies to use passive compression for a bucket. + CompressionModePassive CompressionMode = "passive" + + // CompressionModeActive specifies to use active compression for a bucket. + CompressionModeActive CompressionMode = "active" +) + +type jsonBucketSettings struct { + Name string `json:"name"` + Controllers struct { + Flush string `json:"flush"` + } `json:"controllers"` + ReplicaIndex bool `json:"replicaIndex"` + Quota struct { + RAM uint64 `json:"ram"` + RawRAM uint64 `json:"rawRAM"` + } `json:"quota"` + ReplicaNumber uint32 `json:"replicaNumber"` + BucketType string `json:"bucketType"` + ConflictResolutionType string `json:"conflictResolutionType"` + EvictionPolicy string `json:"evictionPolicy"` + MaxTTL uint32 `json:"maxTTL"` + CompressionMode string `json:"compressionMode"` +} + +// BucketSettings holds information about the settings for a bucket. +type BucketSettings struct { + Name string + FlushEnabled bool + ReplicaIndexDisabled bool // inverted so that zero value matches server default. + RAMQuotaMB uint64 + NumReplicas uint32 // NOTE: If not set this will set 0 replicas. + BucketType BucketType // Defaults to CouchbaseBucketType. + EvictionPolicy EvictionPolicyType + MaxTTL time.Duration + CompressionMode CompressionMode +} + +func (bs *BucketSettings) fromData(data jsonBucketSettings) error { + bs.Name = data.Name + bs.FlushEnabled = data.Controllers.Flush != "" + bs.ReplicaIndexDisabled = !data.ReplicaIndex + bs.RAMQuotaMB = data.Quota.RawRAM / 1024 / 1024 + bs.NumReplicas = data.ReplicaNumber + bs.EvictionPolicy = EvictionPolicyType(data.EvictionPolicy) + bs.MaxTTL = time.Duration(data.MaxTTL) * time.Second + bs.CompressionMode = CompressionMode(data.CompressionMode) + + switch data.BucketType { + case "membase": + bs.BucketType = CouchbaseBucketType + case "memcached": + bs.BucketType = MemcachedBucketType + case "ephemeral": + bs.BucketType = EphemeralBucketType + default: + return errors.New("unrecognized bucket type string") + } + + return nil +} + +type bucketMgrErrorResp struct { + Errors map[string]string `json:"errors"` +} + +func (bm *BucketManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logDebugf("Failed to read bucket manager response body: %s", err) + return nil + } + + if resp.StatusCode == 404 { + // If it was a 404 then there's no chance of the response body containing any structure + if strings.Contains(strings.ToLower(string(b)), "resource not found") { + return makeGenericMgmtError(ErrBucketNotFound, req, resp) + } + + return makeGenericMgmtError(errors.New(string(b)), req, resp) + } + + var mgrErr bucketMgrErrorResp + err = json.Unmarshal(b, &mgrErr) + if err != nil { + logDebugf("Failed to unmarshal error body: %s", err) + return makeGenericMgmtError(errors.New(string(b)), req, resp) + } + + var bodyErr error + var firstErr string + for _, err := range mgrErr.Errors { + firstErr = strings.ToLower(err) + break + } + + if strings.Contains(firstErr, "bucket with given name already exists") { + bodyErr = ErrBucketExists + } else { + bodyErr = errors.New(firstErr) + } + + return makeGenericMgmtError(bodyErr, req, resp) +} + +// Flush doesn't use the same body format as anything else... +func (bm *BucketManager) tryParseFlushErrorMessage(req *mgmtRequest, resp *mgmtResponse) error { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logDebugf("Failed to read bucket manager response body: %s", err) + return makeMgmtBadStatusError("failed to flush bucket", req, resp) + } + + var bodyErrMsgs map[string]string + err = json.Unmarshal(b, &bodyErrMsgs) + if err != nil { + return errors.New(string(b)) + } + + if errMsg, ok := bodyErrMsgs["_"]; ok { + if strings.Contains(strings.ToLower(errMsg), "flush is disabled") { + return ErrBucketNotFlushable + } + } + + return errors.New(string(b)) +} + +// BucketManager provides methods for performing bucket management operations. +// See BucketManager for methods that allow creating and removing buckets themselves. +type BucketManager struct { + provider mgmtProvider + tracer requestTracer +} + +// GetBucketOptions is the set of options available to the bucket manager GetBucket operation. +type GetBucketOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetBucket returns settings for a bucket on the cluster. +func (bm *BucketManager) GetBucket(bucketName string, opts *GetBucketOptions) (*BucketSettings, error) { + if opts == nil { + opts = &GetBucketOptions{} + } + + span := bm.tracer.StartSpan("GetBucket", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + return bm.get(span.Context(), bucketName, opts.RetryStrategy, opts.Timeout) +} + +func (bm *BucketManager) get(tracectx requestSpanContext, bucketName string, + strategy RetryStrategy, timeout time.Duration) (*BucketSettings, error) { + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s", bucketName), + Method: "GET", + IsIdempotent: true, + RetryStrategy: strategy, + UniqueID: uuid.New().String(), + Timeout: timeout, + parentSpan: tracectx, + } + + resp, err := bm.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + bktErr := bm.tryParseErrorMessage(&req, resp) + if bktErr != nil { + return nil, bktErr + } + + return nil, makeMgmtBadStatusError("failed to get bucket", &req, resp) + } + + var bucketData jsonBucketSettings + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&bucketData) + if err != nil { + return nil, err + } + + var settings BucketSettings + err = settings.fromData(bucketData) + if err != nil { + return nil, err + } + + return &settings, nil +} + +// GetAllBucketsOptions is the set of options available to the bucket manager GetAll operation. +type GetAllBucketsOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllBuckets returns a list of all active buckets on the cluster. +func (bm *BucketManager) GetAllBuckets(opts *GetAllBucketsOptions) (map[string]BucketSettings, error) { + if opts == nil { + opts = &GetAllBucketsOptions{} + } + + span := bm.tracer.StartSpan("GetAllBuckets", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: "/pools/default/buckets", + Method: "GET", + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := bm.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + bktErr := bm.tryParseErrorMessage(&req, resp) + if bktErr != nil { + return nil, bktErr + } + + return nil, makeMgmtBadStatusError("failed to get all buckets", &req, resp) + } + + var bucketsData []*jsonBucketSettings + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&bucketsData) + if err != nil { + return nil, err + } + + buckets := make(map[string]BucketSettings, len(bucketsData)) + for _, bucketData := range bucketsData { + var bucket BucketSettings + err := bucket.fromData(*bucketData) + if err != nil { + return nil, err + } + + buckets[bucket.Name] = bucket + } + + return buckets, nil +} + +// CreateBucketSettings are the settings available when creating a bucket. +type CreateBucketSettings struct { + BucketSettings + ConflictResolutionType ConflictResolutionType +} + +// CreateBucketOptions is the set of options available to the bucket manager CreateBucket operation. +type CreateBucketOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateBucket creates a bucket on the cluster. +func (bm *BucketManager) CreateBucket(settings CreateBucketSettings, opts *CreateBucketOptions) error { + if opts == nil { + opts = &CreateBucketOptions{} + } + + span := bm.tracer.StartSpan("CreateBucket", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + posts, err := bm.settingsToPostData(&settings.BucketSettings) + if err != nil { + return err + } + + if settings.ConflictResolutionType != "" { + posts.Add("conflictResolutionType", string(settings.ConflictResolutionType)) + } + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: "/pools/default/buckets", + Method: "POST", + Body: []byte(posts.Encode()), + ContentType: "application/x-www-form-urlencoded", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := bm.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 202 { + bktErr := bm.tryParseErrorMessage(&req, resp) + if bktErr != nil { + return bktErr + } + + return makeMgmtBadStatusError("failed to create bucket", &req, resp) + } + + return nil +} + +// UpdateBucketOptions is the set of options available to the bucket manager UpdateBucket operation. +type UpdateBucketOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// UpdateBucket updates a bucket on the cluster. +func (bm *BucketManager) UpdateBucket(settings BucketSettings, opts *UpdateBucketOptions) error { + if opts == nil { + opts = &UpdateBucketOptions{} + } + + span := bm.tracer.StartSpan("UpdateBucket", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + posts, err := bm.settingsToPostData(&settings) + if err != nil { + return err + } + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s", settings.Name), + Method: "POST", + Body: []byte(posts.Encode()), + ContentType: "application/x-www-form-urlencoded", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := bm.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + bktErr := bm.tryParseErrorMessage(&req, resp) + if bktErr != nil { + return bktErr + } + + return makeMgmtBadStatusError("failed to update bucket", &req, resp) + } + + return nil +} + +// DropBucketOptions is the set of options available to the bucket manager DropBucket operation. +type DropBucketOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropBucket will delete a bucket from the cluster by name. +func (bm *BucketManager) DropBucket(name string, opts *DropBucketOptions) error { + if opts == nil { + opts = &DropBucketOptions{} + } + + span := bm.tracer.StartSpan("DropBucket", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s", name), + Method: "DELETE", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := bm.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + bktErr := bm.tryParseErrorMessage(&req, resp) + if bktErr != nil { + return bktErr + } + + return makeMgmtBadStatusError("failed to drop bucket", &req, resp) + } + + return nil +} + +// FlushBucketOptions is the set of options available to the bucket manager FlushBucket operation. +type FlushBucketOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// FlushBucket will delete all the of the data from a bucket. +// Keep in mind that you must have flushing enabled in the buckets configuration. +func (bm *BucketManager) FlushBucket(name string, opts *FlushBucketOptions) error { + if opts == nil { + opts = &FlushBucketOptions{} + } + + span := bm.tracer.StartSpan("FlushBucket", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Path: fmt.Sprintf("/pools/default/buckets/%s/controller/doFlush", name), + Method: "POST", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := bm.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + return bm.tryParseFlushErrorMessage(&req, resp) + } + + return nil +} + +func (bm *BucketManager) settingsToPostData(settings *BucketSettings) (url.Values, error) { + posts := url.Values{} + + if settings.Name == "" { + return nil, makeInvalidArgumentsError("Name invalid, must be set.") + } + + if settings.RAMQuotaMB < 100 { + return nil, makeInvalidArgumentsError("Memory quota invalid, must be greater than 100MB") + } + + if settings.MaxTTL > 0 && settings.BucketType == MemcachedBucketType { + return nil, makeInvalidArgumentsError("maxTTL is not supported for memcached buckets") + } + + posts.Add("name", settings.Name) + // posts.Add("saslPassword", settings.Password) + + if settings.FlushEnabled { + posts.Add("flushEnabled", "1") + } else { + posts.Add("flushEnabled", "0") + } + + // replicaIndex can't be set at all on ephemeral buckets. + if settings.BucketType != EphemeralBucketType { + if settings.ReplicaIndexDisabled { + posts.Add("replicaIndex", "0") + } else { + posts.Add("replicaIndex", "1") + } + } + + switch settings.BucketType { + case CouchbaseBucketType: + posts.Add("bucketType", string(settings.BucketType)) + posts.Add("replicaNumber", fmt.Sprintf("%d", settings.NumReplicas)) + case MemcachedBucketType: + posts.Add("bucketType", string(settings.BucketType)) + if settings.NumReplicas > 0 { + return nil, makeInvalidArgumentsError("replicas cannot be used with memcached buckets") + } + case EphemeralBucketType: + posts.Add("bucketType", string(settings.BucketType)) + posts.Add("replicaNumber", fmt.Sprintf("%d", settings.NumReplicas)) + default: + return nil, makeInvalidArgumentsError("Unrecognized bucket type") + } + + posts.Add("ramQuotaMB", fmt.Sprintf("%d", settings.RAMQuotaMB)) + + if settings.EvictionPolicy != "" { + switch settings.BucketType { + case MemcachedBucketType: + return nil, makeInvalidArgumentsError("eviction policy is not valid for memcached buckets") + case CouchbaseBucketType: + if settings.EvictionPolicy == EvictionPolicyTypeNoEviction || settings.EvictionPolicy == EvictionPolicyTypeNotRecentlyUsed { + return nil, makeInvalidArgumentsError("eviction policy is not valid for couchbase buckets") + } + case EphemeralBucketType: + if settings.EvictionPolicy == EvictionPolicyTypeFull || settings.EvictionPolicy == EvictionPolicyTypeValueOnly { + return nil, makeInvalidArgumentsError("eviction policy is not valid for ephemeral buckets") + } + } + posts.Add("evictionPolicy", string(settings.EvictionPolicy)) + } + + if settings.MaxTTL > 0 { + posts.Add("maxTTL", fmt.Sprintf("%d", settings.MaxTTL/time.Second)) + } + + if settings.CompressionMode != "" { + posts.Add("compressionMode", string(settings.CompressionMode)) + } + + return posts, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_diag.go b/vendor/github.com/couchbase/gocb/v2/cluster_diag.go new file mode 100644 index 000000000000..a1eb2da5b975 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_diag.go @@ -0,0 +1,128 @@ +package gocb + +import ( + "encoding/json" + "time" + + "github.com/couchbase/gocbcore/v9" + + "github.com/google/uuid" +) + +// EndPointDiagnostics represents a single entry in a diagnostics report. +type EndPointDiagnostics struct { + Type ServiceType + ID string + Local string + Remote string + LastActivity time.Time + State EndpointState + Namespace string +} + +// DiagnosticsResult encapsulates the results of a Diagnostics operation. +type DiagnosticsResult struct { + ID string + Services map[string][]EndPointDiagnostics + sdk string + State ClusterState +} + +type jsonDiagnosticEntry struct { + ID string `json:"id,omitempty"` + LastActivityUs uint64 `json:"last_activity_us,omitempty"` + Remote string `json:"remote,omitempty"` + Local string `json:"local,omitempty"` + State string `json:"state,omitempty"` + Details string `json:"details,omitempty"` + Namespace string `json:"namespace,omitempty"` +} + +type jsonDiagnosticReport struct { + Version int16 `json:"version"` + SDK string `json:"sdk,omitempty"` + ID string `json:"id,omitempty"` + Services map[string][]jsonDiagnosticEntry `json:"services"` + State string `json:"state"` +} + +// MarshalJSON generates a JSON representation of this diagnostics report. +func (report *DiagnosticsResult) MarshalJSON() ([]byte, error) { + jsonReport := jsonDiagnosticReport{ + Version: 2, + SDK: report.sdk, + ID: report.ID, + Services: make(map[string][]jsonDiagnosticEntry), + State: clusterStateToString(report.State), + } + + for _, serviceType := range report.Services { + for _, service := range serviceType { + serviceStr := serviceTypeToString(service.Type) + stateStr := endpointStateToString(service.State) + + jsonReport.Services[serviceStr] = append(jsonReport.Services[serviceStr], jsonDiagnosticEntry{ + ID: service.ID, + LastActivityUs: uint64(time.Since(service.LastActivity).Nanoseconds()), + Remote: service.Remote, + Local: service.Local, + State: stateStr, + Details: "", + Namespace: service.Namespace, + }) + } + } + + return json.Marshal(&jsonReport) +} + +// DiagnosticsOptions are the options that are available for use with the Diagnostics operation. +type DiagnosticsOptions struct { + ReportID string +} + +// Diagnostics returns information about the internal state of the SDK. +func (c *Cluster) Diagnostics(opts *DiagnosticsOptions) (*DiagnosticsResult, error) { + if opts == nil { + opts = &DiagnosticsOptions{} + } + + if opts.ReportID == "" { + opts.ReportID = uuid.New().String() + } + + provider, err := c.getDiagnosticsProvider() + if err != nil { + return nil, err + } + + agentReport, err := provider.Diagnostics(gocbcore.DiagnosticsOptions{}) + if err != nil { + return nil, err + } + + report := &DiagnosticsResult{ + ID: opts.ReportID, + Services: make(map[string][]EndPointDiagnostics), + sdk: Identifier(), + State: ClusterState(agentReport.State), + } + + report.Services["kv"] = make([]EndPointDiagnostics, 0) + + for _, conn := range agentReport.MemdConns { + state := EndpointState(conn.State) + + report.Services["kv"] = append(report.Services["kv"], EndPointDiagnostics{ + Type: ServiceTypeKeyValue, + State: state, + Local: conn.LocalAddr, + Remote: conn.RemoteAddr, + LastActivity: conn.LastActivity, + Namespace: conn.Scope, + ID: conn.ID, + }) + } + + return report, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_ping.go b/vendor/github.com/couchbase/gocb/v2/cluster_ping.go new file mode 100644 index 000000000000..63b256aa5b72 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_ping.go @@ -0,0 +1,92 @@ +package gocb + +import ( + "time" + + "github.com/couchbase/gocbcore/v9" + "github.com/google/uuid" +) + +// Ping will ping a list of services and verify they are active and +// responding in an acceptable period of time. +func (c *Cluster) Ping(opts *PingOptions) (*PingResult, error) { + if opts == nil { + opts = &PingOptions{} + } + + provider, err := c.getDiagnosticsProvider() + if err != nil { + return nil, err + } + + return ping(provider, opts, c.timeoutsConfig) +} + +func ping(provider diagnosticsProvider, opts *PingOptions, timeouts TimeoutsConfig) (*PingResult, error) { + services := opts.ServiceTypes + + gocbcoreServices := make([]gocbcore.ServiceType, len(services)) + for i, svc := range services { + gocbcoreServices[i] = gocbcore.ServiceType(svc) + } + + coreopts := gocbcore.PingOptions{ + ServiceTypes: gocbcoreServices, + } + now := time.Now() + timeout := opts.Timeout + if timeout == 0 { + coreopts.KVDeadline = now.Add(timeouts.KVTimeout) + coreopts.CapiDeadline = now.Add(timeouts.ViewTimeout) + coreopts.N1QLDeadline = now.Add(timeouts.QueryTimeout) + coreopts.CbasDeadline = now.Add(timeouts.AnalyticsTimeout) + coreopts.FtsDeadline = now.Add(timeouts.SearchTimeout) + coreopts.MgmtDeadline = now.Add(timeouts.ManagementTimeout) + } else { + coreopts.KVDeadline = now.Add(timeout) + coreopts.CapiDeadline = now.Add(timeout) + coreopts.N1QLDeadline = now.Add(timeout) + coreopts.CbasDeadline = now.Add(timeout) + coreopts.FtsDeadline = now.Add(timeout) + coreopts.MgmtDeadline = now.Add(timeout) + } + + id := opts.ReportID + if id == "" { + id = uuid.New().String() + } + + result, err := provider.Ping(coreopts) + if err != nil { + return nil, err + } + + reportSvcs := make(map[ServiceType][]EndpointPingReport) + for svcType, svc := range result.Services { + st := ServiceType(svcType) + + svcs := make([]EndpointPingReport, len(svc)) + for i, rep := range svc { + var errStr string + if rep.Error != nil { + errStr = rep.Error.Error() + } + svcs[i] = EndpointPingReport{ + ID: rep.ID, + Remote: rep.Endpoint, + State: PingState(rep.State), + Error: errStr, + Namespace: rep.Scope, + Latency: rep.Latency, + } + } + + reportSvcs[st] = svcs + } + + return &PingResult{ + ID: id, + sdk: Identifier() + " " + "gocbcore/" + gocbcore.Version(), + Services: reportSvcs, + }, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_query.go b/vendor/github.com/couchbase/gocb/v2/cluster_query.go new file mode 100644 index 000000000000..ee392c96bdb5 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_query.go @@ -0,0 +1,314 @@ +package gocb + +import ( + "encoding/json" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type jsonQueryMetrics struct { + ElapsedTime string `json:"elapsedTime"` + ExecutionTime string `json:"executionTime"` + ResultCount uint64 `json:"resultCount"` + ResultSize uint64 `json:"resultSize"` + MutationCount uint64 `json:"mutationCount,omitempty"` + SortCount uint64 `json:"sortCount,omitempty"` + ErrorCount uint64 `json:"errorCount,omitempty"` + WarningCount uint64 `json:"warningCount,omitempty"` +} + +type jsonQueryWarning struct { + Code uint32 `json:"code"` + Message string `json:"msg"` +} + +type jsonQueryResponse struct { + RequestID string `json:"requestID"` + ClientContextID string `json:"clientContextID"` + Status QueryStatus `json:"status"` + Warnings []jsonQueryWarning `json:"warnings"` + Metrics jsonQueryMetrics `json:"metrics"` + Profile interface{} `json:"profile"` + Signature interface{} `json:"signature"` + Prepared string `json:"prepared"` +} + +// QueryMetrics encapsulates various metrics gathered during a queries execution. +type QueryMetrics struct { + ElapsedTime time.Duration + ExecutionTime time.Duration + ResultCount uint64 + ResultSize uint64 + MutationCount uint64 + SortCount uint64 + ErrorCount uint64 + WarningCount uint64 +} + +func (metrics *QueryMetrics) fromData(data jsonQueryMetrics) error { + elapsedTime, err := time.ParseDuration(data.ElapsedTime) + if err != nil { + logDebugf("Failed to parse query metrics elapsed time: %s", err) + } + + executionTime, err := time.ParseDuration(data.ExecutionTime) + if err != nil { + logDebugf("Failed to parse query metrics execution time: %s", err) + } + + metrics.ElapsedTime = elapsedTime + metrics.ExecutionTime = executionTime + metrics.ResultCount = data.ResultCount + metrics.ResultSize = data.ResultSize + metrics.MutationCount = data.MutationCount + metrics.SortCount = data.SortCount + metrics.ErrorCount = data.ErrorCount + metrics.WarningCount = data.WarningCount + + return nil +} + +// QueryWarning encapsulates any warnings returned by a query. +type QueryWarning struct { + Code uint32 + Message string +} + +func (warning *QueryWarning) fromData(data jsonQueryWarning) error { + warning.Code = data.Code + warning.Message = data.Message + + return nil +} + +// QueryMetaData provides access to the meta-data properties of a query result. +type QueryMetaData struct { + RequestID string + ClientContextID string + Status QueryStatus + Metrics QueryMetrics + Signature interface{} + Warnings []QueryWarning + Profile interface{} + + preparedName string +} + +func (meta *QueryMetaData) fromData(data jsonQueryResponse) error { + metrics := QueryMetrics{} + if err := metrics.fromData(data.Metrics); err != nil { + return err + } + + warnings := make([]QueryWarning, len(data.Warnings)) + for wIdx, jsonWarning := range data.Warnings { + err := warnings[wIdx].fromData(jsonWarning) + if err != nil { + return err + } + } + + meta.RequestID = data.RequestID + meta.ClientContextID = data.ClientContextID + meta.Status = data.Status + meta.Metrics = metrics + meta.Signature = data.Signature + meta.Warnings = warnings + meta.Profile = data.Profile + meta.preparedName = data.Prepared + + return nil +} + +// QueryResult allows access to the results of a query. +type QueryResult struct { + reader queryRowReader + + rowBytes []byte +} + +func newQueryResult(reader queryRowReader) *QueryResult { + return &QueryResult{ + reader: reader, + } +} + +// Next assigns the next result from the results into the value pointer, returning whether the read was successful. +func (r *QueryResult) Next() bool { + rowBytes := r.reader.NextRow() + if rowBytes == nil { + return false + } + + r.rowBytes = rowBytes + return true +} + +// Row returns the contents of the current row +func (r *QueryResult) Row(valuePtr interface{}) error { + if r.rowBytes == nil { + return ErrNoResult + } + + if bytesPtr, ok := valuePtr.(*json.RawMessage); ok { + *bytesPtr = r.rowBytes + return nil + } + + return json.Unmarshal(r.rowBytes, valuePtr) +} + +// Err returns any errors that have occurred on the stream +func (r *QueryResult) Err() error { + return r.reader.Err() +} + +// Close marks the results as closed, returning any errors that occurred during reading the results. +func (r *QueryResult) Close() error { + return r.reader.Close() +} + +// One assigns the first value from the results into the value pointer. +// It will close the results but not before iterating through all remaining +// results, as such this should only be used for very small resultsets - ideally +// of, at most, length 1. +func (r *QueryResult) One(valuePtr interface{}) error { + // Read the bytes from the first row + valueBytes := r.reader.NextRow() + if valueBytes == nil { + return ErrNoResult + } + + // Skip through the remaining rows + for r.reader.NextRow() != nil { + // do nothing with the row + } + + return json.Unmarshal(valueBytes, valuePtr) +} + +// MetaData returns any meta-data that was available from this query. Note that +// the meta-data will only be available once the object has been closed (either +// implicitly or explicitly). +func (r *QueryResult) MetaData() (*QueryMetaData, error) { + metaDataBytes, err := r.reader.MetaData() + if err != nil { + return nil, err + } + + var jsonResp jsonQueryResponse + err = json.Unmarshal(metaDataBytes, &jsonResp) + if err != nil { + return nil, err + } + + var metaData QueryMetaData + err = metaData.fromData(jsonResp) + if err != nil { + return nil, err + } + + return &metaData, nil +} + +type queryRowReader interface { + NextRow() []byte + Err() error + MetaData() ([]byte, error) + Close() error + PreparedName() (string, error) +} + +// Query executes the query statement on the server. +func (c *Cluster) Query(statement string, opts *QueryOptions) (*QueryResult, error) { + if opts == nil { + opts = &QueryOptions{} + } + + span := c.tracer.StartSpan("Query", opts.parentSpan). + SetTag("couchbase.service", "query") + defer span.Finish() + + timeout := opts.Timeout + if timeout == 0 { + timeout = c.timeoutsConfig.QueryTimeout + } + deadline := time.Now().Add(timeout) + + retryStrategy := c.retryStrategyWrapper + if opts.RetryStrategy != nil { + retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy) + } + + queryOpts, err := opts.toMap() + if err != nil { + return nil, QueryError{ + InnerError: wrapError(err, "failed to generate query options"), + Statement: statement, + ClientContextID: opts.ClientContextID, + } + } + + queryOpts["statement"] = statement + + return c.execN1qlQuery(span, queryOpts, deadline, retryStrategy, opts.Adhoc) +} + +func maybeGetQueryOption(options map[string]interface{}, name string) string { + if value, ok := options[name].(string); ok { + return value + } + return "" +} + +func (c *Cluster) execN1qlQuery( + span requestSpan, + options map[string]interface{}, + deadline time.Time, + retryStrategy *retryStrategyWrapper, + adHoc bool, +) (*QueryResult, error) { + provider, err := c.getQueryProvider() + if err != nil { + return nil, QueryError{ + InnerError: wrapError(err, "failed to get query provider"), + Statement: maybeGetQueryOption(options, "statement"), + ClientContextID: maybeGetQueryOption(options, "client_context_id"), + } + } + + eSpan := c.tracer.StartSpan("request_encoding", span.Context()) + reqBytes, err := json.Marshal(options) + eSpan.Finish() + if err != nil { + return nil, QueryError{ + InnerError: wrapError(err, "failed to marshall query body"), + Statement: maybeGetQueryOption(options, "statement"), + ClientContextID: maybeGetQueryOption(options, "client_context_id"), + } + } + + var res queryRowReader + var qErr error + if adHoc { + res, qErr = provider.N1QLQuery(gocbcore.N1QLQueryOptions{ + Payload: reqBytes, + RetryStrategy: retryStrategy, + Deadline: deadline, + TraceContext: span.Context(), + }) + } else { + res, qErr = provider.PreparedN1QLQuery(gocbcore.N1QLQueryOptions{ + Payload: reqBytes, + RetryStrategy: retryStrategy, + Deadline: deadline, + TraceContext: span.Context(), + }) + } + if qErr != nil { + return nil, maybeEnhanceQueryError(qErr) + } + + return newQueryResult(res), nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_queryindexes.go b/vendor/github.com/couchbase/gocb/v2/cluster_queryindexes.go new file mode 100644 index 000000000000..2b0bf6d18bae --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_queryindexes.go @@ -0,0 +1,558 @@ +package gocb + +import ( + "encoding/json" + "errors" + "regexp" + "strings" + "time" +) + +// QueryIndexManager provides methods for performing Couchbase query index management. +type QueryIndexManager struct { + provider queryIndexQueryProvider + + globalTimeout time.Duration + tracer requestTracer +} + +type queryIndexQueryProvider interface { + Query(statement string, opts *QueryOptions) (*QueryResult, error) +} + +func (qm *QueryIndexManager) tryParseErrorMessage(err error) error { + var qErr *QueryError + if !errors.As(err, &qErr) { + return err + } + + if len(qErr.Errors) == 0 { + return err + } + + firstErr := qErr.Errors[0] + var innerErr error + // The server doesn't return meaningful error codes when it comes to index management so we need to go spelunking. + msg := strings.ToLower(firstErr.Message) + if match, err := regexp.MatchString(".*?ndex .*? not found.*", msg); err == nil && match { + innerErr = ErrIndexNotFound + } else if match, err := regexp.MatchString(".*?ndex .*? already exists.*", msg); err == nil && match { + innerErr = ErrIndexExists + } + + if innerErr == nil { + return err + } + + return QueryError{ + InnerError: innerErr, + Statement: qErr.Statement, + ClientContextID: qErr.ClientContextID, + Errors: qErr.Errors, + Endpoint: qErr.Endpoint, + RetryReasons: qErr.RetryReasons, + RetryAttempts: qErr.RetryAttempts, + } +} + +func (qm *QueryIndexManager) doQuery(q string, opts *QueryOptions) ([][]byte, error) { + if opts.Timeout == 0 { + opts.Timeout = qm.globalTimeout + } + + result, err := qm.provider.Query(q, opts) + if err != nil { + return nil, qm.tryParseErrorMessage(err) + } + + var rows [][]byte + for result.Next() { + var row json.RawMessage + err := result.Row(&row) + if err != nil { + logWarnf("management operation failed to read row: %s", err) + } else { + rows = append(rows, row) + } + } + err = result.Err() + if err != nil { + return nil, qm.tryParseErrorMessage(err) + } + + return rows, nil +} + +type jsonQueryIndex struct { + Name string `json:"name"` + IsPrimary bool `json:"is_primary"` + Type QueryIndexType `json:"using"` + State string `json:"state"` + Keyspace string `json:"keyspace_id"` + Namespace string `json:"namespace_id"` + IndexKey []string `json:"index_key"` + Condition string `json:"condition"` +} + +// QueryIndex represents a Couchbase GSI index. +type QueryIndex struct { + Name string + IsPrimary bool + Type QueryIndexType + State string + Keyspace string + Namespace string + IndexKey []string + Condition string +} + +func (index *QueryIndex) fromData(data jsonQueryIndex) error { + index.Name = data.Name + index.IsPrimary = data.IsPrimary + index.Type = data.Type + index.State = data.State + index.Keyspace = data.Keyspace + index.Namespace = data.Namespace + index.IndexKey = data.IndexKey + index.Condition = data.Condition + + return nil +} + +type createQueryIndexOptions struct { + IgnoreIfExists bool + Deferred bool + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +func (qm *QueryIndexManager) createIndex( + tracectx requestSpanContext, + bucketName, indexName string, + fields []string, + opts createQueryIndexOptions, +) error { + var qs string + + if len(fields) == 0 { + qs += "CREATE PRIMARY INDEX" + } else { + qs += "CREATE INDEX" + } + if indexName != "" { + qs += " `" + indexName + "`" + } + qs += " ON `" + bucketName + "`" + if len(fields) > 0 { + qs += " (" + for i := 0; i < len(fields); i++ { + if i > 0 { + qs += ", " + } + qs += "`" + fields[i] + "`" + } + qs += ")" + } + if opts.Deferred { + qs += " WITH {\"defer_build\": true}" + } + + _, err := qm.doQuery(qs, &QueryOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: tracectx, + }) + if err == nil { + return nil + } + + if opts.IgnoreIfExists && errors.Is(err, ErrIndexExists) { + return nil + } + + return err +} + +// CreateQueryIndexOptions is the set of options available to the query indexes CreateIndex operation. +type CreateQueryIndexOptions struct { + IgnoreIfExists bool + Deferred bool + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreateIndex creates an index over the specified fields. +func (qm *QueryIndexManager) CreateIndex(bucketName, indexName string, fields []string, opts *CreateQueryIndexOptions) error { + if opts == nil { + opts = &CreateQueryIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{ + message: "an invalid index name was specified", + } + } + if len(fields) <= 0 { + return invalidArgumentsError{ + message: "you must specify at least one field to index", + } + } + + span := qm.tracer.StartSpan("CreateIndex", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + return qm.createIndex(span.Context(), bucketName, indexName, fields, createQueryIndexOptions{ + IgnoreIfExists: opts.IgnoreIfExists, + Deferred: opts.Deferred, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + }) +} + +// CreatePrimaryQueryIndexOptions is the set of options available to the query indexes CreatePrimaryIndex operation. +type CreatePrimaryQueryIndexOptions struct { + IgnoreIfExists bool + Deferred bool + CustomName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// CreatePrimaryIndex creates a primary index. An empty customName uses the default naming. +func (qm *QueryIndexManager) CreatePrimaryIndex(bucketName string, opts *CreatePrimaryQueryIndexOptions) error { + if opts == nil { + opts = &CreatePrimaryQueryIndexOptions{} + } + + span := qm.tracer.StartSpan("CreatePrimaryIndex", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + return qm.createIndex( + span.Context(), + bucketName, + opts.CustomName, + nil, + createQueryIndexOptions{ + IgnoreIfExists: opts.IgnoreIfExists, + Deferred: opts.Deferred, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + }) +} + +type dropQueryIndexOptions struct { + IgnoreIfNotExists bool + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +func (qm *QueryIndexManager) dropIndex( + tracectx requestSpanContext, + bucketName, indexName string, + opts dropQueryIndexOptions, +) error { + var qs string + + if indexName == "" { + qs += "DROP PRIMARY INDEX ON `" + bucketName + "`" + } else { + qs += "DROP INDEX `" + bucketName + "`.`" + indexName + "`" + } + + _, err := qm.doQuery(qs, &QueryOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: tracectx, + }) + if err == nil { + return nil + } + + if opts.IgnoreIfNotExists && errors.Is(err, ErrIndexNotFound) { + return nil + } + + return err +} + +// DropQueryIndexOptions is the set of options available to the query indexes DropIndex operation. +type DropQueryIndexOptions struct { + IgnoreIfNotExists bool + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropIndex drops a specific index by name. +func (qm *QueryIndexManager) DropIndex(bucketName, indexName string, opts *DropQueryIndexOptions) error { + if opts == nil { + opts = &DropQueryIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{ + message: "an invalid index name was specified", + } + } + + span := qm.tracer.StartSpan("DropIndex", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + return qm.dropIndex( + span.Context(), + bucketName, + indexName, + dropQueryIndexOptions{ + IgnoreIfNotExists: opts.IgnoreIfNotExists, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + }) +} + +// DropPrimaryQueryIndexOptions is the set of options available to the query indexes DropPrimaryIndex operation. +type DropPrimaryQueryIndexOptions struct { + IgnoreIfNotExists bool + CustomName string + + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropPrimaryIndex drops the primary index. Pass an empty customName for unnamed primary indexes. +func (qm *QueryIndexManager) DropPrimaryIndex(bucketName string, opts *DropPrimaryQueryIndexOptions) error { + if opts == nil { + opts = &DropPrimaryQueryIndexOptions{} + } + + span := qm.tracer.StartSpan("DropPrimaryIndex", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + return qm.dropIndex( + span.Context(), + bucketName, + opts.CustomName, + dropQueryIndexOptions{ + IgnoreIfNotExists: opts.IgnoreIfNotExists, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + }) +} + +// GetAllQueryIndexesOptions is the set of options available to the query indexes GetAllIndexes operation. +type GetAllQueryIndexesOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllIndexes returns a list of all currently registered indexes. +func (qm *QueryIndexManager) GetAllIndexes(bucketName string, opts *GetAllQueryIndexesOptions) ([]QueryIndex, error) { + if opts == nil { + opts = &GetAllQueryIndexesOptions{} + } + + span := qm.tracer.StartSpan("GetAllIndexes", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + return qm.getAllIndexes(span.Context(), bucketName, opts) +} + +func (qm *QueryIndexManager) getAllIndexes( + tracectx requestSpanContext, + bucketName string, + opts *GetAllQueryIndexesOptions, +) ([]QueryIndex, error) { + q := "SELECT `indexes`.* FROM system:indexes WHERE keyspace_id=? AND `using`=\"gsi\"" + rows, err := qm.doQuery(q, &QueryOptions{ + PositionalParameters: []interface{}{bucketName}, + Readonly: true, + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: tracectx, + }) + if err != nil { + return nil, err + } + + var indexes []QueryIndex + for _, row := range rows { + var jsonIdx jsonQueryIndex + err := json.Unmarshal(row, &jsonIdx) + if err != nil { + return nil, err + } + + var index QueryIndex + err = index.fromData(jsonIdx) + if err != nil { + return nil, err + } + + indexes = append(indexes, index) + } + + return indexes, nil +} + +// BuildDeferredQueryIndexOptions is the set of options available to the query indexes BuildDeferredIndexes operation. +type BuildDeferredQueryIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// BuildDeferredIndexes builds all indexes which are currently in deferred state. +func (qm *QueryIndexManager) BuildDeferredIndexes(bucketName string, opts *BuildDeferredQueryIndexOptions) ([]string, error) { + if opts == nil { + opts = &BuildDeferredQueryIndexOptions{} + } + + span := qm.tracer.StartSpan("BuildDeferredIndexes", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + indexList, err := qm.getAllIndexes( + span.Context(), + bucketName, + &GetAllQueryIndexesOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + }) + if err != nil { + return nil, err + } + + var deferredList []string + for i := 0; i < len(indexList); i++ { + var index = indexList[i] + if index.State == "deferred" || index.State == "pending" { + deferredList = append(deferredList, index.Name) + } + } + + if len(deferredList) == 0 { + // Don't try to build an empty index list + return nil, nil + } + + var qs string + qs += "BUILD INDEX ON `" + bucketName + "`(" + for i := 0; i < len(deferredList); i++ { + if i > 0 { + qs += ", " + } + qs += "`" + deferredList[i] + "`" + } + qs += ")" + + _, err = qm.doQuery(qs, &QueryOptions{ + Timeout: opts.Timeout, + RetryStrategy: opts.RetryStrategy, + parentSpan: span, + }) + if err != nil { + return nil, err + } + + return deferredList, nil +} + +func checkIndexesActive(indexes []QueryIndex, checkList []string) (bool, error) { + var checkIndexes []QueryIndex + for i := 0; i < len(checkList); i++ { + indexName := checkList[i] + + for j := 0; j < len(indexes); j++ { + if indexes[j].Name == indexName { + checkIndexes = append(checkIndexes, indexes[j]) + break + } + } + } + + if len(checkIndexes) != len(checkList) { + return false, ErrIndexNotFound + } + + for i := 0; i < len(checkIndexes); i++ { + if checkIndexes[i].State != "online" { + return false, nil + } + } + return true, nil +} + +// WatchQueryIndexOptions is the set of options available to the query indexes Watch operation. +type WatchQueryIndexOptions struct { + WatchPrimary bool + + RetryStrategy RetryStrategy +} + +// WatchIndexes waits for a set of indexes to come online. +func (qm *QueryIndexManager) WatchIndexes(bucketName string, watchList []string, timeout time.Duration, opts *WatchQueryIndexOptions) error { + if opts == nil { + opts = &WatchQueryIndexOptions{} + } + + span := qm.tracer.StartSpan("WatchIndexes", nil). + SetTag("couchbase.service", "query") + defer span.Finish() + + if opts.WatchPrimary { + watchList = append(watchList, "#primary") + } + + deadline := time.Now().Add(timeout) + + curInterval := 50 * time.Millisecond + for { + if deadline.Before(time.Now()) { + return ErrUnambiguousTimeout + } + + indexes, err := qm.getAllIndexes( + span.Context(), + bucketName, + &GetAllQueryIndexesOptions{ + Timeout: time.Until(deadline), + RetryStrategy: opts.RetryStrategy, + }) + if err != nil { + return err + } + + allOnline, err := checkIndexesActive(indexes, watchList) + if err != nil { + return err + } + + if allOnline { + break + } + + curInterval += 500 * time.Millisecond + if curInterval > 1000 { + curInterval = 1000 + } + + // Make sure we don't sleep past our overall deadline, if we adjust the + // deadline then it will be caught at the top of this loop as a timeout. + sleepDeadline := time.Now().Add(curInterval) + if sleepDeadline.After(deadline) { + sleepDeadline = deadline + } + + // wait till our next poll interval + time.Sleep(time.Until(sleepDeadline)) + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_searchindexes.go b/vendor/github.com/couchbase/gocb/v2/cluster_searchindexes.go new file mode 100644 index 000000000000..6817d9110d8c --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_searchindexes.go @@ -0,0 +1,670 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/pkg/errors" +) + +type jsonSearchIndexResp struct { + Status string `json:"status"` + IndexDef *jsonSearchIndex `json:"indexDef"` +} + +type jsonSearchIndexDefs struct { + IndexDefs map[string]jsonSearchIndex `json:"indexDefs"` + ImplVersion string `json:"implVersion"` +} + +type jsonSearchIndexesResp struct { + Status string `json:"status"` + IndexDefs jsonSearchIndexDefs `json:"indexDefs"` +} + +type jsonSearchIndex struct { + UUID string `json:"uuid"` + Name string `json:"name"` + SourceName string `json:"sourceName"` + Type string `json:"type"` + Params map[string]interface{} `json:"params"` + SourceUUID string `json:"sourceUUID"` + SourceParams map[string]interface{} `json:"sourceParams"` + SourceType string `json:"sourceType"` + PlanParams map[string]interface{} `json:"planParams"` +} + +// SearchIndex is used to define a search index. +type SearchIndex struct { + // UUID is required for updates. It provides a means of ensuring consistency, the UUID must match the UUID value + // for the index on the server. + UUID string + // Name represents the name of this index. + Name string + // SourceName is the name of the source of the data for the index e.g. bucket name. + SourceName string + // Type is the type of index, e.g. fulltext-index or fulltext-alias. + Type string + // IndexParams are index properties such as store type and mappings. + Params map[string]interface{} + // SourceUUID is the UUID of the data source, this can be used to more tightly tie the index to a source. + SourceUUID string + // SourceParams are extra parameters to be defined. These are usually things like advanced connection and tuning + // parameters. + SourceParams map[string]interface{} + // SourceType is the type of the data source, e.g. couchbase or nil depending on the Type field. + SourceType string + // PlanParams are plan properties such as number of replicas and number of partitions. + PlanParams map[string]interface{} +} + +func (si *SearchIndex) fromData(data jsonSearchIndex) error { + si.UUID = data.UUID + si.Name = data.Name + si.SourceName = data.SourceName + si.Type = data.Type + si.Params = data.Params + si.SourceUUID = data.SourceUUID + si.SourceParams = data.SourceParams + si.SourceType = data.SourceType + si.PlanParams = data.PlanParams + + return nil +} + +func (si *SearchIndex) toData() (jsonSearchIndex, error) { + var data jsonSearchIndex + + data.UUID = si.UUID + data.Name = si.Name + data.SourceName = si.SourceName + data.Type = si.Type + data.Params = si.Params + data.SourceUUID = si.SourceUUID + data.SourceParams = si.SourceParams + data.SourceType = si.SourceType + data.PlanParams = si.PlanParams + + return data, nil +} + +// SearchIndexManager provides methods for performing Couchbase search index management. +type SearchIndexManager struct { + mgmtProvider mgmtProvider + + tracer requestTracer +} + +func (sm *SearchIndexManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logDebugf("Failed to read search index response body: %s", err) + return nil + } + + var bodyErr error + if strings.Contains(strings.ToLower(string(b)), "index not found") { + bodyErr = ErrIndexNotFound + } else if strings.Contains(strings.ToLower(string(b)), "index with the same name already exists") { + bodyErr = ErrIndexExists + } else { + bodyErr = errors.New(string(b)) + } + + return makeGenericMgmtError(bodyErr, req, resp) +} + +func (sm *SearchIndexManager) doMgmtRequest(req mgmtRequest) (*mgmtResponse, error) { + resp, err := sm.mgmtProvider.executeMgmtRequest(req) + if err != nil { + return nil, err + } + + return resp, nil +} + +// GetAllSearchIndexOptions is the set of options available to the search indexes GetAllIndexes operation. +type GetAllSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllIndexes retrieves all of the search indexes for the cluster. +func (sm *SearchIndexManager) GetAllIndexes(opts *GetAllSearchIndexOptions) ([]SearchIndex, error) { + if opts == nil { + opts = &GetAllSearchIndexOptions{} + } + + span := sm.tracer.StartSpan("GetAllIndexes", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: "GET", + Path: "/api/index", + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + resp, err := sm.doMgmtRequest(req) + if err != nil { + return nil, err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + idxErr := sm.tryParseErrorMessage(&req, resp) + if idxErr != nil { + return nil, idxErr + } + + return nil, makeMgmtBadStatusError("failed to get index", &req, resp) + } + + var indexesResp jsonSearchIndexesResp + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&indexesResp) + if err != nil { + return nil, err + } + + indexDefs := indexesResp.IndexDefs.IndexDefs + var indexes []SearchIndex + for _, indexData := range indexDefs { + var index SearchIndex + err := index.fromData(indexData) + if err != nil { + return nil, err + } + + indexes = append(indexes, index) + } + + return indexes, nil +} + +// GetSearchIndexOptions is the set of options available to the search indexes GetIndex operation. +type GetSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetIndex retrieves a specific search index by name. +func (sm *SearchIndexManager) GetIndex(indexName string, opts *GetSearchIndexOptions) (*SearchIndex, error) { + if opts == nil { + opts = &GetSearchIndexOptions{} + } + + span := sm.tracer.StartSpan("GetIndex", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: "GET", + Path: fmt.Sprintf("/api/index/%s", indexName), + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + resp, err := sm.doMgmtRequest(req) + if err != nil { + return nil, err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + idxErr := sm.tryParseErrorMessage(&req, resp) + if idxErr != nil { + return nil, idxErr + } + + return nil, makeMgmtBadStatusError("failed to get index", &req, resp) + } + + var indexResp jsonSearchIndexResp + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&indexResp) + if err != nil { + return nil, err + } + + var indexDef SearchIndex + err = indexDef.fromData(*indexResp.IndexDef) + if err != nil { + return nil, err + } + + return &indexDef, nil +} + +// UpsertSearchIndexOptions is the set of options available to the search index manager UpsertIndex operation. +type UpsertSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// UpsertIndex creates or updates a search index. +func (sm *SearchIndexManager) UpsertIndex(indexDefinition SearchIndex, opts *UpsertSearchIndexOptions) error { + if opts == nil { + opts = &UpsertSearchIndexOptions{} + } + + if indexDefinition.Name == "" { + return invalidArgumentsError{"index name cannot be empty"} + } + if indexDefinition.Type == "" { + return invalidArgumentsError{"index type cannot be empty"} + } + + span := sm.tracer.StartSpan("UpsertIndex", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + indexData, err := indexDefinition.toData() + if err != nil { + return err + } + + b, err := json.Marshal(indexData) + if err != nil { + return err + } + + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: "PUT", + Path: fmt.Sprintf("/api/index/%s", indexDefinition.Name), + Headers: map[string]string{ + "cache-control": "no-cache", + }, + Body: b, + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + resp, err := sm.doMgmtRequest(req) + if err != nil { + return err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + idxErr := sm.tryParseErrorMessage(&req, resp) + if idxErr != nil { + return idxErr + } + + return makeMgmtBadStatusError("failed to create index", &req, resp) + } + + return nil +} + +// DropSearchIndexOptions is the set of options available to the search index DropIndex operation. +type DropSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropIndex removes the search index with the specific name. +func (sm *SearchIndexManager) DropIndex(indexName string, opts *DropSearchIndexOptions) error { + if opts == nil { + opts = &DropSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("DropIndex", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: "DELETE", + Path: fmt.Sprintf("/api/index/%s", indexName), + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + resp, err := sm.doMgmtRequest(req) + if err != nil { + return err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + return makeMgmtBadStatusError("failed to drop the index", &req, resp) + } + + return nil +} + +// AnalyzeDocumentOptions is the set of options available to the search index AnalyzeDocument operation. +type AnalyzeDocumentOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// AnalyzeDocument returns how a doc is analyzed against a specific index. +func (sm *SearchIndexManager) AnalyzeDocument(indexName string, doc interface{}, opts *AnalyzeDocumentOptions) ([]interface{}, error) { + if opts == nil { + opts = &AnalyzeDocumentOptions{} + } + + if indexName == "" { + return nil, invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("AnalyzeDocument", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + b, err := json.Marshal(doc) + if err != nil { + return nil, err + } + + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: "POST", + Path: fmt.Sprintf("/api/index/%s/analyzeDoc", indexName), + Body: b, + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + resp, err := sm.doMgmtRequest(req) + if err != nil { + return nil, err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + idxErr := sm.tryParseErrorMessage(&req, resp) + if idxErr != nil { + return nil, idxErr + } + + return nil, makeMgmtBadStatusError("failed to analyze document", &req, resp) + } + + var analysis struct { + Status string `json:"status"` + Analyzed []interface{} `json:"analyzed"` + } + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&analysis) + if err != nil { + return nil, err + } + + return analysis.Analyzed, nil +} + +// GetIndexedDocumentsCountOptions is the set of options available to the search index GetIndexedDocumentsCount operation. +type GetIndexedDocumentsCountOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetIndexedDocumentsCount retrieves the document count for a search index. +func (sm *SearchIndexManager) GetIndexedDocumentsCount(indexName string, opts *GetIndexedDocumentsCountOptions) (uint64, error) { + if opts == nil { + opts = &GetIndexedDocumentsCountOptions{} + } + + if indexName == "" { + return 0, invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("GetIndexedDocumentsCount", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: "GET", + Path: fmt.Sprintf("/api/index/%s/count", indexName), + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + resp, err := sm.doMgmtRequest(req) + if err != nil { + return 0, err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + idxErr := sm.tryParseErrorMessage(&req, resp) + if idxErr != nil { + return 0, idxErr + } + + return 0, makeMgmtBadStatusError("failed to get the indexed documents count", &req, resp) + } + + var count struct { + Count uint64 `json:"count"` + } + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&count) + if err != nil { + return 0, err + } + + return count.Count, nil +} + +func (sm *SearchIndexManager) performControlRequest( + tracectx requestSpanContext, + method, uri string, + timeout time.Duration, + retryStrategy RetryStrategy, +) error { + req := mgmtRequest{ + Service: ServiceTypeSearch, + Method: method, + Path: uri, + IsIdempotent: true, + Timeout: timeout, + RetryStrategy: retryStrategy, + parentSpan: tracectx, + } + + resp, err := sm.doMgmtRequest(req) + if err != nil { + return err + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode != 200 { + idxErr := sm.tryParseErrorMessage(&req, resp) + if idxErr != nil { + return idxErr + } + + return makeMgmtBadStatusError("failed to perform the control request", &req, resp) + } + + return nil +} + +// PauseIngestSearchIndexOptions is the set of options available to the search index PauseIngest operation. +type PauseIngestSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// PauseIngest pauses updates and maintenance for an index. +func (sm *SearchIndexManager) PauseIngest(indexName string, opts *PauseIngestSearchIndexOptions) error { + if opts == nil { + opts = &PauseIngestSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("PauseIngest", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + return sm.performControlRequest( + span.Context(), + "POST", + fmt.Sprintf("/api/index/%s/ingestControl/pause", indexName), + opts.Timeout, + opts.RetryStrategy) +} + +// ResumeIngestSearchIndexOptions is the set of options available to the search index ResumeIngest operation. +type ResumeIngestSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// ResumeIngest resumes updates and maintenance for an index. +func (sm *SearchIndexManager) ResumeIngest(indexName string, opts *ResumeIngestSearchIndexOptions) error { + if opts == nil { + opts = &ResumeIngestSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("ResumeIngest", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + return sm.performControlRequest( + span.Context(), + "POST", + fmt.Sprintf("/api/index/%s/ingestControl/resume", indexName), + opts.Timeout, + opts.RetryStrategy) +} + +// AllowQueryingSearchIndexOptions is the set of options available to the search index AllowQuerying operation. +type AllowQueryingSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// AllowQuerying allows querying against an index. +func (sm *SearchIndexManager) AllowQuerying(indexName string, opts *AllowQueryingSearchIndexOptions) error { + if opts == nil { + opts = &AllowQueryingSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("AllowQuerying", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + return sm.performControlRequest( + span.Context(), + "POST", + fmt.Sprintf("/api/index/%s/queryControl/allow", indexName), + opts.Timeout, + opts.RetryStrategy) +} + +// DisallowQueryingSearchIndexOptions is the set of options available to the search index DisallowQuerying operation. +type DisallowQueryingSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DisallowQuerying disallows querying against an index. +func (sm *SearchIndexManager) DisallowQuerying(indexName string, opts *AllowQueryingSearchIndexOptions) error { + if opts == nil { + opts = &AllowQueryingSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("DisallowQuerying", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + return sm.performControlRequest( + span.Context(), + "POST", + fmt.Sprintf("/api/index/%s/queryControl/disallow", indexName), + opts.Timeout, + opts.RetryStrategy) +} + +// FreezePlanSearchIndexOptions is the set of options available to the search index FreezePlan operation. +type FreezePlanSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// FreezePlan freezes the assignment of index partitions to nodes. +func (sm *SearchIndexManager) FreezePlan(indexName string, opts *AllowQueryingSearchIndexOptions) error { + if opts == nil { + opts = &AllowQueryingSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("FreezePlan", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + return sm.performControlRequest( + span.Context(), + "POST", + fmt.Sprintf("/api/index/%s/planFreezeControl/freeze", indexName), + opts.Timeout, + opts.RetryStrategy) +} + +// UnfreezePlanSearchIndexOptions is the set of options available to the search index UnfreezePlan operation. +type UnfreezePlanSearchIndexOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// UnfreezePlan unfreezes the assignment of index partitions to nodes. +func (sm *SearchIndexManager) UnfreezePlan(indexName string, opts *AllowQueryingSearchIndexOptions) error { + if opts == nil { + opts = &AllowQueryingSearchIndexOptions{} + } + + if indexName == "" { + return invalidArgumentsError{"indexName cannot be empty"} + } + + span := sm.tracer.StartSpan("UnfreezePlan", nil). + SetTag("couchbase.service", "search") + defer span.Finish() + + return sm.performControlRequest( + span.Context(), + "POST", + fmt.Sprintf("/api/index/%s/planFreezeControl/unfreeze", indexName), + opts.Timeout, + opts.RetryStrategy) +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_searchquery.go b/vendor/github.com/couchbase/gocb/v2/cluster_searchquery.go new file mode 100644 index 000000000000..70df2a0dfc3d --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_searchquery.go @@ -0,0 +1,342 @@ +package gocb + +import ( + "encoding/json" + "time" + + cbsearch "github.com/couchbase/gocb/v2/search" + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type jsonRowLocation struct { + Field string `json:"field"` + Term string `json:"term"` + Position uint32 `json:"position"` + Start uint32 `json:"start"` + End uint32 `json:"end"` + ArrayPositions []uint32 `json:"array_positions"` +} + +type jsonSearchFacet struct { + Name string `json:"name"` + Field string `json:"field"` + Total uint64 `json:"total"` + Missing uint64 `json:"missing"` + Other uint64 `json:"other"` +} + +type jsonSearchRowLocations map[string]map[string][]jsonRowLocation + +type jsonSearchRow struct { + Index string `json:"index"` + ID string `json:"id"` + Score float64 `json:"score"` + Explanation interface{} `json:"explanation"` + Locations jsonSearchRowLocations `json:"locations"` + Fragments map[string][]string `json:"fragments"` + Fields json.RawMessage `json:"fields"` +} + +type jsonSearchResponse struct { + Errors map[string]string `json:"errors"` + TotalHits uint64 `json:"total_hits"` + MaxScore float64 `json:"max_score"` + Took uint64 `json:"took"` + Facets map[string]jsonSearchFacet `json:"facets"` +} + +// SearchMetrics encapsulates various metrics gathered during a search queries execution. +type SearchMetrics struct { + Took time.Duration + TotalRows uint64 + MaxScore float64 + TotalPartitionCount uint64 + SuccessPartitionCount uint64 + ErrorPartitionCount uint64 +} + +func (metrics *SearchMetrics) fromData(data jsonSearchResponse) error { + metrics.TotalRows = data.TotalHits + metrics.MaxScore = data.MaxScore + metrics.Took = time.Duration(data.Took) * time.Microsecond + + return nil +} + +// SearchMetaData provides access to the meta-data properties of a search query result. +type SearchMetaData struct { + Metrics SearchMetrics + Errors map[string]string +} + +func (meta *SearchMetaData) fromData(data jsonSearchResponse) error { + metrics := SearchMetrics{} + if err := metrics.fromData(data); err != nil { + return err + } + + meta.Metrics = metrics + meta.Errors = data.Errors + + return nil +} + +// SearchFacetResult provides access to the result of a faceted query. +type SearchFacetResult struct { + Name string + Field string + Total uint64 + Missing uint64 + Other uint64 +} + +func (fr *SearchFacetResult) fromData(data jsonSearchFacet) error { + fr.Name = data.Name + fr.Field = data.Field + fr.Total = data.Total + fr.Missing = data.Missing + fr.Other = data.Other + + return nil +} + +// SearchRowLocation represents the location of a row match +type SearchRowLocation struct { + Position uint32 + Start uint32 + End uint32 + ArrayPositions []uint32 +} + +func (rl *SearchRowLocation) fromData(data jsonRowLocation) error { + rl.Position = data.Position + rl.Start = data.Start + rl.End = data.End + rl.ArrayPositions = data.ArrayPositions + + return nil +} + +// SearchRow represents a single hit returned from a search query. +type SearchRow struct { + Index string + ID string + Score float64 + Explanation interface{} + Locations map[string]map[string][]SearchRowLocation + Fragments map[string][]string + fieldsBytes []byte +} + +// Fields decodes the fields included in a search hit. +func (sr *SearchRow) Fields(valuePtr interface{}) error { + return json.Unmarshal(sr.fieldsBytes, valuePtr) +} + +type searchRowReader interface { + NextRow() []byte + Err() error + MetaData() ([]byte, error) + Close() error +} + +// SearchResult allows access to the results of a search query. +type SearchResult struct { + reader searchRowReader + + currentRow SearchRow +} + +func newSearchResult(reader searchRowReader) *SearchResult { + return &SearchResult{ + reader: reader, + } +} + +// Next assigns the next result from the results into the value pointer, returning whether the read was successful. +func (r *SearchResult) Next() bool { + rowBytes := r.reader.NextRow() + if rowBytes == nil { + return false + } + + r.currentRow = SearchRow{} + + var rowData jsonSearchRow + if err := json.Unmarshal(rowBytes, &rowData); err == nil { + r.currentRow.Index = rowData.Index + r.currentRow.ID = rowData.ID + r.currentRow.Score = rowData.Score + r.currentRow.Explanation = rowData.Explanation + r.currentRow.Fragments = rowData.Fragments + r.currentRow.fieldsBytes = rowData.Fields + + locations := make(map[string]map[string][]SearchRowLocation) + for fieldName, fieldData := range rowData.Locations { + terms := make(map[string][]SearchRowLocation) + for termName, termData := range fieldData { + locations := make([]SearchRowLocation, len(termData)) + for locIdx, locData := range termData { + err := locations[locIdx].fromData(locData) + if err != nil { + logWarnf("failed to parse search query location data: %s", err) + } + } + terms[termName] = locations + } + locations[fieldName] = terms + } + r.currentRow.Locations = locations + } + + return true +} + +// Row returns the contents of the current row. +func (r *SearchResult) Row() SearchRow { + return r.currentRow +} + +// Err returns any errors that have occurred on the stream +func (r *SearchResult) Err() error { + return r.reader.Err() +} + +// Close marks the results as closed, returning any errors that occurred during reading the results. +func (r *SearchResult) Close() error { + return r.reader.Close() +} + +func (r *SearchResult) getJSONResp() (jsonSearchResponse, error) { + metaDataBytes, err := r.reader.MetaData() + if err != nil { + return jsonSearchResponse{}, err + } + + var jsonResp jsonSearchResponse + err = json.Unmarshal(metaDataBytes, &jsonResp) + if err != nil { + return jsonSearchResponse{}, err + } + + return jsonResp, nil +} + +// MetaData returns any meta-data that was available from this query. Note that +// the meta-data will only be available once the object has been closed (either +// implicitly or explicitly). +func (r *SearchResult) MetaData() (*SearchMetaData, error) { + jsonResp, err := r.getJSONResp() + if err != nil { + return nil, err + } + + var metaData SearchMetaData + err = metaData.fromData(jsonResp) + if err != nil { + return nil, err + } + + return &metaData, nil +} + +// Facets returns any facets that were returned with this query. Note that the +// facets will only be available once the object has been closed (either +// implicitly or explicitly). +func (r *SearchResult) Facets() (map[string]SearchFacetResult, error) { + jsonResp, err := r.getJSONResp() + if err != nil { + return nil, err + } + + facets := make(map[string]SearchFacetResult) + for facetName, facetData := range jsonResp.Facets { + var facet SearchFacetResult + err := facet.fromData(facetData) + if err != nil { + return nil, err + } + + facets[facetName] = facet + } + + return facets, nil +} + +// SearchQuery executes the analytics query statement on the server. +func (c *Cluster) SearchQuery(indexName string, query cbsearch.Query, opts *SearchOptions) (*SearchResult, error) { + if opts == nil { + opts = &SearchOptions{} + } + + span := c.tracer.StartSpan("SearchQuery", opts.parentSpan). + SetTag("couchbase.service", "search") + defer span.Finish() + + timeout := opts.Timeout + if timeout == 0 { + timeout = c.timeoutsConfig.SearchTimeout + } + deadline := time.Now().Add(timeout) + + retryStrategy := c.retryStrategyWrapper + if opts.RetryStrategy != nil { + retryStrategy = newRetryStrategyWrapper(opts.RetryStrategy) + } + + searchOpts, err := opts.toMap() + if err != nil { + return nil, SearchError{ + InnerError: wrapError(err, "failed to generate query options"), + Query: query, + } + } + + searchOpts["query"] = query + + return c.execSearchQuery(span, indexName, searchOpts, deadline, retryStrategy) +} + +func maybeGetSearchOptionQuery(options map[string]interface{}) interface{} { + if value, ok := options["query"]; ok { + return value + } + return "" +} + +func (c *Cluster) execSearchQuery( + span requestSpan, + indexName string, + options map[string]interface{}, + deadline time.Time, + retryStrategy *retryStrategyWrapper, +) (*SearchResult, error) { + provider, err := c.getSearchProvider() + if err != nil { + return nil, SearchError{ + InnerError: wrapError(err, "failed to get query provider"), + Query: maybeGetSearchOptionQuery(options), + } + } + + reqBytes, err := json.Marshal(options) + if err != nil { + return nil, SearchError{ + InnerError: wrapError(err, "failed to marshall query body"), + Query: maybeGetSearchOptionQuery(options), + } + } + + res, err := provider.SearchQuery(gocbcore.SearchQueryOptions{ + IndexName: indexName, + Payload: reqBytes, + RetryStrategy: retryStrategy, + Deadline: deadline, + TraceContext: span.Context(), + }) + if err != nil { + return nil, maybeEnhanceSearchError(err) + } + + return newSearchResult(res), nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/cluster_usermgr.go b/vendor/github.com/couchbase/gocb/v2/cluster_usermgr.go new file mode 100644 index 000000000000..b52cd17fe254 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/cluster_usermgr.go @@ -0,0 +1,792 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" +) + +// AuthDomain specifies the user domain of a specific user +type AuthDomain string + +const ( + // LocalDomain specifies users that are locally stored in Couchbase. + LocalDomain AuthDomain = "local" + + // ExternalDomain specifies users that are externally stored + // (in LDAP for instance). + ExternalDomain AuthDomain = "external" +) + +type jsonOrigin struct { + Type string `json:"type"` + Name string `json:"name"` +} + +type jsonRole struct { + RoleName string `json:"role"` + BucketName string `json:"bucket_name"` +} + +type jsonRoleDescription struct { + jsonRole + + Name string `json:"name"` + Description string `json:"desc"` +} + +type jsonRoleOrigins struct { + jsonRole + + Origins []jsonOrigin +} + +type jsonUserMetadata struct { + ID string `json:"id"` + Name string `json:"name"` + Roles []jsonRoleOrigins `json:"roles"` + Groups []string `json:"groups"` + Domain AuthDomain `json:"domain"` + ExternalGroups []string `json:"external_groups"` + PasswordChanged time.Time `json:"password_change_date"` +} + +type jsonGroup struct { + Name string `json:"id"` + Description string `json:"description"` + Roles []jsonRole `json:"roles"` + LDAPGroupReference string `json:"ldap_group_ref"` +} + +// Role represents a specific permission. +type Role struct { + Name string `json:"role"` + Bucket string `json:"bucket_name"` +} + +func (ro *Role) fromData(data jsonRole) error { + ro.Name = data.RoleName + ro.Bucket = data.BucketName + + return nil +} + +// RoleAndDescription represents a role with its display name and description. +type RoleAndDescription struct { + Role + + DisplayName string + Description string +} + +func (rd *RoleAndDescription) fromData(data jsonRoleDescription) error { + err := rd.Role.fromData(data.jsonRole) + if err != nil { + return err + } + + rd.DisplayName = data.Name + rd.Description = data.Description + + return nil +} + +// Origin indicates why a user has a specific role. Is the Origin Type is "user" then the role is assigned +// directly to the user. If the type is "group" then it means that the role has been inherited from the group +// identified by the Name field. +type Origin struct { + Type string + Name string +} + +func (o *Origin) fromData(data jsonOrigin) error { + o.Type = data.Type + o.Name = data.Name + + return nil +} + +// RoleAndOrigins associates a role with its origins. +type RoleAndOrigins struct { + Role + + Origins []Origin +} + +func (ro *RoleAndOrigins) fromData(data jsonRoleOrigins) error { + err := ro.Role.fromData(data.jsonRole) + if err != nil { + return err + } + + origins := make([]Origin, len(data.Origins)) + for _, originData := range data.Origins { + var origin Origin + err := origin.fromData(originData) + if err != nil { + return err + } + + origins = append(origins, origin) + } + ro.Origins = origins + + return nil +} + +// User represents a user which was retrieved from the server. +type User struct { + Username string + DisplayName string + // Roles are the roles assigned to the user that are of type "user". + Roles []Role + Groups []string + Password string +} + +// UserAndMetadata represents a user and user meta-data from the server. +type UserAndMetadata struct { + User + + Domain AuthDomain + // EffectiveRoles are all of the user's roles and the origins. + EffectiveRoles []RoleAndOrigins + ExternalGroups []string + PasswordChanged time.Time +} + +func (um *UserAndMetadata) fromData(data jsonUserMetadata) error { + um.User.Username = data.ID + um.User.DisplayName = data.Name + um.User.Groups = data.Groups + + um.ExternalGroups = data.ExternalGroups + um.Domain = data.Domain + um.PasswordChanged = data.PasswordChanged + + var roles []Role + var effectiveRoles []RoleAndOrigins + for _, roleData := range data.Roles { + var effectiveRole RoleAndOrigins + err := effectiveRole.fromData(roleData) + if err != nil { + return err + } + + effectiveRoles = append(effectiveRoles, effectiveRole) + + role := effectiveRole.Role + if roleData.Origins == nil { + roles = append(roles, role) + } else { + for _, origin := range effectiveRole.Origins { + if origin.Type == "user" { + roles = append(roles, role) + break + } + } + } + } + um.EffectiveRoles = effectiveRoles + um.User.Roles = roles + + return nil +} + +// Group represents a user group on the server. +type Group struct { + Name string + Description string + Roles []Role + LDAPGroupReference string +} + +func (g *Group) fromData(data jsonGroup) error { + g.Name = data.Name + g.Description = data.Description + g.LDAPGroupReference = data.LDAPGroupReference + + roles := make([]Role, len(data.Roles)) + for roleIdx, roleData := range data.Roles { + err := roles[roleIdx].fromData(roleData) + if err != nil { + return err + } + } + g.Roles = roles + + return nil +} + +// UserManager provides methods for performing Couchbase user management. +type UserManager struct { + provider mgmtProvider + tracer requestTracer +} + +func (um *UserManager) tryParseErrorMessage(req *mgmtRequest, resp *mgmtResponse) error { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logDebugf("Failed to read search index response body: %s", err) + return nil + } + + var bodyErr error + if resp.StatusCode == 404 { + if strings.Contains(strings.ToLower(string(b)), "unknown user") { + bodyErr = ErrUserNotFound + } else if strings.Contains(strings.ToLower(string(b)), "user was not found") { + bodyErr = ErrUserNotFound + } else if strings.Contains(strings.ToLower(string(b)), "group was not found") { + bodyErr = ErrGroupNotFound + } else if strings.Contains(strings.ToLower(string(b)), "unknown group") { + bodyErr = ErrGroupNotFound + } else { + bodyErr = errors.New(string(b)) + } + } else { + bodyErr = errors.New(string(b)) + } + + return makeGenericMgmtError(bodyErr, req, resp) +} + +// GetAllUsersOptions is the set of options available to the user manager GetAll operation. +type GetAllUsersOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy + + DomainName string +} + +// GetAllUsers returns a list of all the users from the cluster. +func (um *UserManager) GetAllUsers(opts *GetAllUsersOptions) ([]UserAndMetadata, error) { + if opts == nil { + opts = &GetAllUsersOptions{} + } + + span := um.tracer.StartSpan("GetAllUsers", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + if opts.DomainName == "" { + opts.DomainName = string(LocalDomain) + } + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "GET", + Path: fmt.Sprintf("/settings/rbac/users/%s", opts.DomainName), + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return nil, usrErr + } + return nil, makeMgmtBadStatusError("failed to get users", &req, resp) + } + + var usersData []jsonUserMetadata + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&usersData) + if err != nil { + return nil, err + } + + users := make([]UserAndMetadata, len(usersData)) + for userIdx, userData := range usersData { + err := users[userIdx].fromData(userData) + if err != nil { + return nil, err + } + } + + return users, nil +} + +// GetUserOptions is the set of options available to the user manager Get operation. +type GetUserOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy + + DomainName string +} + +// GetUser returns the data for a particular user +func (um *UserManager) GetUser(name string, opts *GetUserOptions) (*UserAndMetadata, error) { + if opts == nil { + opts = &GetUserOptions{} + } + + span := um.tracer.StartSpan("GetUser", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + if opts.DomainName == "" { + opts.DomainName = string(LocalDomain) + } + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "GET", + Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, name), + IsIdempotent: true, + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return nil, usrErr + } + return nil, makeMgmtBadStatusError("failed to get user", &req, resp) + } + + var userData jsonUserMetadata + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&userData) + if err != nil { + return nil, err + } + + var user UserAndMetadata + err = user.fromData(userData) + if err != nil { + return nil, err + } + + return &user, nil +} + +// UpsertUserOptions is the set of options available to the user manager Upsert operation. +type UpsertUserOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy + + DomainName string +} + +// UpsertUser updates a built-in RBAC user on the cluster. +func (um *UserManager) UpsertUser(user User, opts *UpsertUserOptions) error { + if opts == nil { + opts = &UpsertUserOptions{} + } + + span := um.tracer.StartSpan("UpsertUser", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + if opts.DomainName == "" { + opts.DomainName = string(LocalDomain) + } + + var reqRoleStrs []string + for _, roleData := range user.Roles { + if roleData.Bucket == "" { + reqRoleStrs = append(reqRoleStrs, roleData.Name) + } else { + reqRoleStrs = append(reqRoleStrs, fmt.Sprintf("%s[%s]", roleData.Name, roleData.Bucket)) + } + } + + reqForm := make(url.Values) + reqForm.Add("name", user.DisplayName) + if user.Password != "" { + reqForm.Add("password", user.Password) + } + if len(user.Groups) > 0 { + reqForm.Add("groups", strings.Join(user.Groups, ",")) + } + reqForm.Add("roles", strings.Join(reqRoleStrs, ",")) + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "PUT", + Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, user.Username), + Body: []byte(reqForm.Encode()), + ContentType: "application/x-www-form-urlencoded", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return usrErr + } + return makeMgmtBadStatusError("failed to upsert user", &req, resp) + } + + return nil +} + +// DropUserOptions is the set of options available to the user manager Drop operation. +type DropUserOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy + + DomainName string +} + +// DropUser removes a built-in RBAC user on the cluster. +func (um *UserManager) DropUser(name string, opts *DropUserOptions) error { + if opts == nil { + opts = &DropUserOptions{} + } + + span := um.tracer.StartSpan("DropUser", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + if opts.DomainName == "" { + opts.DomainName = string(LocalDomain) + } + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "DELETE", + Path: fmt.Sprintf("/settings/rbac/users/%s/%s", opts.DomainName, name), + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return usrErr + } + return makeMgmtBadStatusError("failed to drop user", &req, resp) + } + + return nil +} + +// GetRolesOptions is the set of options available to the user manager GetRoles operation. +type GetRolesOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetRoles lists the roles supported by the cluster. +func (um *UserManager) GetRoles(opts *GetRolesOptions) ([]RoleAndDescription, error) { + if opts == nil { + opts = &GetRolesOptions{} + } + + span := um.tracer.StartSpan("GetRoles", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "GET", + Path: "/settings/rbac/roles", + RetryStrategy: opts.RetryStrategy, + IsIdempotent: true, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return nil, usrErr + } + return nil, makeMgmtBadStatusError("failed to get roles", &req, resp) + } + + var roleDatas []jsonRoleDescription + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&roleDatas) + if err != nil { + return nil, err + } + + roles := make([]RoleAndDescription, len(roleDatas)) + for roleIdx, roleData := range roleDatas { + err := roles[roleIdx].fromData(roleData) + if err != nil { + return nil, err + } + } + + return roles, nil +} + +// GetGroupOptions is the set of options available to the group manager Get operation. +type GetGroupOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetGroup fetches a single group from the server. +func (um *UserManager) GetGroup(groupName string, opts *GetGroupOptions) (*Group, error) { + if groupName == "" { + return nil, makeInvalidArgumentsError("groupName cannot be empty") + } + if opts == nil { + opts = &GetGroupOptions{} + } + + span := um.tracer.StartSpan("GetGroup", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "GET", + Path: fmt.Sprintf("/settings/rbac/groups/%s", groupName), + RetryStrategy: opts.RetryStrategy, + IsIdempotent: true, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return nil, usrErr + } + return nil, makeMgmtBadStatusError("failed to get group", &req, resp) + } + + var groupData jsonGroup + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&groupData) + if err != nil { + return nil, err + } + + var group Group + err = group.fromData(groupData) + if err != nil { + return nil, err + } + + return &group, nil +} + +// GetAllGroupsOptions is the set of options available to the group manager GetAll operation. +type GetAllGroupsOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllGroups fetches all groups from the server. +func (um *UserManager) GetAllGroups(opts *GetAllGroupsOptions) ([]Group, error) { + if opts == nil { + opts = &GetAllGroupsOptions{} + } + + span := um.tracer.StartSpan("GetAllGroups", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "GET", + Path: "/settings/rbac/groups", + RetryStrategy: opts.RetryStrategy, + IsIdempotent: true, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return nil, makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return nil, usrErr + } + return nil, makeMgmtBadStatusError("failed to get all groups", &req, resp) + } + + var groupDatas []jsonGroup + jsonDec := json.NewDecoder(resp.Body) + err = jsonDec.Decode(&groupDatas) + if err != nil { + return nil, err + } + + groups := make([]Group, len(groupDatas)) + for groupIdx, groupData := range groupDatas { + err = groups[groupIdx].fromData(groupData) + if err != nil { + return nil, err + } + } + + return groups, nil +} + +// UpsertGroupOptions is the set of options available to the group manager Upsert operation. +type UpsertGroupOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// UpsertGroup creates, or updates, a group on the server. +func (um *UserManager) UpsertGroup(group Group, opts *UpsertGroupOptions) error { + if group.Name == "" { + return makeInvalidArgumentsError("group name cannot be empty") + } + if opts == nil { + opts = &UpsertGroupOptions{} + } + + span := um.tracer.StartSpan("UpsertGroup", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + var reqRoleStrs []string + for _, roleData := range group.Roles { + if roleData.Bucket == "" { + reqRoleStrs = append(reqRoleStrs, roleData.Name) + } else { + reqRoleStrs = append(reqRoleStrs, fmt.Sprintf("%s[%s]", roleData.Name, roleData.Bucket)) + } + } + + reqForm := make(url.Values) + reqForm.Add("description", group.Description) + reqForm.Add("ldap_group_ref", group.LDAPGroupReference) + reqForm.Add("roles", strings.Join(reqRoleStrs, ",")) + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "PUT", + Path: fmt.Sprintf("/settings/rbac/groups/%s", group.Name), + Body: []byte(reqForm.Encode()), + ContentType: "application/x-www-form-urlencoded", + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return usrErr + } + return makeMgmtBadStatusError("failed to upsert group", &req, resp) + } + + return nil +} + +// DropGroupOptions is the set of options available to the group manager Drop operation. +type DropGroupOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// DropGroup removes a group from the server. +func (um *UserManager) DropGroup(groupName string, opts *DropGroupOptions) error { + if groupName == "" { + return makeInvalidArgumentsError("groupName cannot be empty") + } + + if opts == nil { + opts = &DropGroupOptions{} + } + + span := um.tracer.StartSpan("DropGroup", nil). + SetTag("couchbase.service", "mgmt") + defer span.Finish() + + req := mgmtRequest{ + Service: ServiceTypeManagement, + Method: "DELETE", + Path: fmt.Sprintf("/settings/rbac/groups/%s", groupName), + RetryStrategy: opts.RetryStrategy, + UniqueID: uuid.New().String(), + Timeout: opts.Timeout, + parentSpan: span.Context(), + } + + resp, err := um.provider.executeMgmtRequest(req) + if err != nil { + return makeGenericMgmtError(err, &req, resp) + } + defer ensureBodyClosed(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + usrErr := um.tryParseErrorMessage(&req, resp) + if usrErr != nil { + return usrErr + } + return makeMgmtBadStatusError("failed to drop group", &req, resp) + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection.go b/vendor/github.com/couchbase/gocb/v2/collection.go new file mode 100644 index 000000000000..946a668e05ea --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection.go @@ -0,0 +1,75 @@ +package gocb + +import "time" + +type kvTimeoutsConfig struct { + KVTimeout time.Duration + KVDurableTimeout time.Duration +} + +// Collection represents a single collection. +type Collection struct { + collectionName string + scope string + bucket *Bucket + + timeoutsConfig kvTimeoutsConfig + + transcoder Transcoder + retryStrategyWrapper *retryStrategyWrapper + tracer requestTracer + + useMutationTokens bool + + getKvProvider func() (kvProvider, error) +} + +func newCollection(scope *Scope, collectionName string) *Collection { + return &Collection{ + collectionName: collectionName, + scope: scope.Name(), + bucket: scope.bucket, + + timeoutsConfig: scope.timeoutsConfig, + + transcoder: scope.transcoder, + retryStrategyWrapper: scope.retryStrategyWrapper, + tracer: scope.tracer, + + useMutationTokens: scope.useMutationTokens, + + getKvProvider: scope.getKvProvider, + } +} + +func (c *Collection) name() string { + return c.collectionName +} + +// ScopeName returns the name of the scope to which this collection belongs. +// UNCOMMITTED: This API may change in the future. +func (c *Collection) ScopeName() string { + return c.scope +} + +// Bucket returns the name of the bucket to which this collection belongs. +// UNCOMMITTED: This API may change in the future. +func (c *Collection) Bucket() *Bucket { + return c.bucket +} + +// Name returns the name of the collection. +func (c *Collection) Name() string { + return c.collectionName +} + +func (c *Collection) startKvOpTrace(operationName string, tracectx requestSpanContext) requestSpan { + return c.tracer.StartSpan(operationName, tracectx). + SetTag("couchbase.bucket", c.bucket). + SetTag("couchbase.collection", c.collectionName). + SetTag("couchbase.service", "kv") +} + +func (c *Collection) bucketName() string { + return c.bucket.Name() +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection_binary_crud.go b/vendor/github.com/couchbase/gocb/v2/collection_binary_crud.go new file mode 100644 index 000000000000..4f24becc37a0 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection_binary_crud.go @@ -0,0 +1,312 @@ +package gocb + +import ( + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +// BinaryCollection is a set of binary operations. +type BinaryCollection struct { + collection *Collection +} + +// AppendOptions are the options available to the Append operation. +type AppendOptions struct { + Timeout time.Duration + DurabilityLevel DurabilityLevel + PersistTo uint + ReplicateTo uint + Cas Cas + RetryStrategy RetryStrategy +} + +func (c *Collection) binaryAppend(id string, val []byte, opts *AppendOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &AppendOptions{} + } + + opm := c.newKvOpManager("Append", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Append(gocbcore.AdjoinOptions{ + Key: opm.DocumentID(), + Value: val, + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + Cas: gocbcore.Cas(opts.Cas), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.AdjoinResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// Append appends a byte value to a document. +func (c *BinaryCollection) Append(id string, val []byte, opts *AppendOptions) (mutOut *MutationResult, errOut error) { + return c.collection.binaryAppend(id, val, opts) +} + +// PrependOptions are the options available to the Prepend operation. +type PrependOptions struct { + Timeout time.Duration + DurabilityLevel DurabilityLevel + PersistTo uint + ReplicateTo uint + Cas Cas + RetryStrategy RetryStrategy +} + +func (c *Collection) binaryPrepend(id string, val []byte, opts *PrependOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &PrependOptions{} + } + + opm := c.newKvOpManager("Prepend", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Prepend(gocbcore.AdjoinOptions{ + Key: opm.DocumentID(), + Value: val, + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + Cas: gocbcore.Cas(opts.Cas), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.AdjoinResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// Prepend prepends a byte value to a document. +func (c *BinaryCollection) Prepend(id string, val []byte, opts *PrependOptions) (mutOut *MutationResult, errOut error) { + return c.collection.binaryPrepend(id, val, opts) +} + +// IncrementOptions are the options available to the Increment operation. +type IncrementOptions struct { + Timeout time.Duration + // Expiry is the length of time that the document will be stored in Couchbase. + // A value of 0 will set the document to never expire. + Expiry time.Duration + // Initial, if non-negative, is the `initial` value to use for the document if it does not exist. + // If present, this is the value that will be returned by a successful operation. + Initial int64 + // Delta is the value to use for incrementing/decrementing if Initial is not present. + Delta uint64 + DurabilityLevel DurabilityLevel + PersistTo uint + ReplicateTo uint + Cas Cas + RetryStrategy RetryStrategy +} + +func (c *Collection) binaryIncrement(id string, opts *IncrementOptions) (countOut *CounterResult, errOut error) { + if opts == nil { + opts = &IncrementOptions{} + } + + opm := c.newKvOpManager("Increment", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + realInitial := uint64(0xFFFFFFFFFFFFFFFF) + if opts.Initial >= 0 { + realInitial = uint64(opts.Initial) + } + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Increment(gocbcore.CounterOptions{ + Key: opm.DocumentID(), + Delta: opts.Delta, + Initial: realInitial, + Expiry: durationToExpiry(opts.Expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + Cas: gocbcore.Cas(opts.Cas), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.CounterResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + countOut = &CounterResult{} + countOut.cas = Cas(res.Cas) + countOut.mt = opm.EnhanceMt(res.MutationToken) + countOut.content = res.Value + + opm.Resolve(countOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// Increment performs an atomic addition for an integer document. Passing a +// non-negative `initial` value will cause the document to be created if it did not +// already exist. +func (c *BinaryCollection) Increment(id string, opts *IncrementOptions) (countOut *CounterResult, errOut error) { + return c.collection.binaryIncrement(id, opts) +} + +// DecrementOptions are the options available to the Decrement operation. +type DecrementOptions struct { + Timeout time.Duration + // Expiry is the length of time that the document will be stored in Couchbase. + // A value of 0 will set the document to never expire. + Expiry time.Duration + // Initial, if non-negative, is the `initial` value to use for the document if it does not exist. + // If present, this is the value that will be returned by a successful operation. + Initial int64 + // Delta is the value to use for incrementing/decrementing if Initial is not present. + Delta uint64 + DurabilityLevel DurabilityLevel + PersistTo uint + ReplicateTo uint + Cas Cas + RetryStrategy RetryStrategy +} + +func (c *Collection) binaryDecrement(id string, opts *DecrementOptions) (countOut *CounterResult, errOut error) { + if opts == nil { + opts = &DecrementOptions{} + } + + opm := c.newKvOpManager("Decrement", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + realInitial := uint64(0xFFFFFFFFFFFFFFFF) + if opts.Initial >= 0 { + realInitial = uint64(opts.Initial) + } + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Decrement(gocbcore.CounterOptions{ + Key: opm.DocumentID(), + Delta: opts.Delta, + Initial: realInitial, + Expiry: durationToExpiry(opts.Expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + Cas: gocbcore.Cas(opts.Cas), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.CounterResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + countOut = &CounterResult{} + countOut.cas = Cas(res.Cas) + countOut.mt = opm.EnhanceMt(res.MutationToken) + countOut.content = res.Value + + opm.Resolve(countOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// Decrement performs an atomic subtraction for an integer document. Passing a +// non-negative `initial` value will cause the document to be created if it did not +// already exist. +func (c *BinaryCollection) Decrement(id string, opts *DecrementOptions) (countOut *CounterResult, errOut error) { + return c.collection.binaryDecrement(id, opts) +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection_bulk.go b/vendor/github.com/couchbase/gocb/v2/collection_bulk.go new file mode 100644 index 000000000000..da421fc4ee6a --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection_bulk.go @@ -0,0 +1,745 @@ +package gocb + +import ( + "time" + + "github.com/couchbase/gocbcore/v9" +) + +type bulkOp struct { + pendop gocbcore.PendingOp + span requestSpan +} + +func (op *bulkOp) cancel() { + op.pendop.Cancel() +} + +func (op *bulkOp) finish() { + op.span.Finish() +} + +// BulkOp represents a single operation that can be submitted (within a list of more operations) to .Do() +// You can create a bulk operation by instantiating one of the implementations of BulkOp, +// such as GetOp, UpsertOp, ReplaceOp, and more. +// UNCOMMITTED: This API may change in the future. +type BulkOp interface { + execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) + markError(err error) + cancel() + finish() +} + +// BulkOpOptions are the set of options available when performing BulkOps using Do. +type BulkOpOptions struct { + Timeout time.Duration + Transcoder Transcoder + RetryStrategy RetryStrategy +} + +// Do execute one or more `BulkOp` items in parallel. +// UNCOMMITTED: This API may change in the future. +func (c *Collection) Do(ops []BulkOp, opts *BulkOpOptions) error { + if opts == nil { + opts = &BulkOpOptions{} + } + + span := c.startKvOpTrace("Do", nil) + + timeout := opts.Timeout + if opts.Timeout == 0 { + timeout = c.timeoutsConfig.KVTimeout * time.Duration(len(ops)) + } + + retryWrapper := c.retryStrategyWrapper + if opts.RetryStrategy != nil { + retryWrapper = newRetryStrategyWrapper(opts.RetryStrategy) + } + + if opts.Transcoder == nil { + opts.Transcoder = c.transcoder + } + + agent, err := c.getKvProvider() + if err != nil { + return err + } + + // Make the channel big enough to hold all our ops in case + // we get delayed inside execute (don't want to block the + // individual op handlers when they dispatch their signal). + signal := make(chan BulkOp, len(ops)) + for _, item := range ops { + item.execute(span.Context(), c, agent, opts.Transcoder, signal, retryWrapper, time.Now().Add(timeout), c.startKvOpTrace) + } + + for range ops { + item := <-signal + // We're really just clearing the pendop from this thread, + // since it already completed, no cancel actually occurs + item.finish() + } + return nil +} + +// GetOp represents a type of `BulkOp` used for Get operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type GetOp struct { + bulkOp + + ID string + Result *GetResult + Err error +} + +func (item *GetOp) markError(err error) { + item.Err = err +} + +func (item *GetOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("GetOp", tracectx) + item.bulkOp.span = span + + op, err := provider.Get(gocbcore.GetOptions{ + Key: []byte(item.ID), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.GetResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &GetResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + transcoder: transcoder, + contents: res.Value, + flags: res.Flags, + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// GetAndTouchOp represents a type of `BulkOp` used for GetAndTouch operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type GetAndTouchOp struct { + bulkOp + + ID string + Expiry time.Duration + Result *GetResult + Err error +} + +func (item *GetAndTouchOp) markError(err error) { + item.Err = err +} + +func (item *GetAndTouchOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("GetAndTouchOp", tracectx) + item.bulkOp.span = span + + op, err := provider.GetAndTouch(gocbcore.GetAndTouchOptions{ + Key: []byte(item.ID), + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.GetAndTouchResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &GetResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + transcoder: transcoder, + contents: res.Value, + flags: res.Flags, + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// TouchOp represents a type of `BulkOp` used for Touch operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type TouchOp struct { + bulkOp + + ID string + Expiry time.Duration + Result *MutationResult + Err error +} + +func (item *TouchOp) markError(err error) { + item.Err = err +} + +func (item *TouchOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("TouchOp", tracectx) + item.bulkOp.span = span + + op, err := provider.Touch(gocbcore.TouchOptions{ + Key: []byte(item.ID), + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.TouchResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// RemoveOp represents a type of `BulkOp` used for Remove operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type RemoveOp struct { + bulkOp + + ID string + Cas Cas + Result *MutationResult + Err error +} + +func (item *RemoveOp) markError(err error) { + item.Err = err +} + +func (item *RemoveOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("RemoveOp", tracectx) + item.bulkOp.span = span + + op, err := provider.Delete(gocbcore.DeleteOptions{ + Key: []byte(item.ID), + Cas: gocbcore.Cas(item.Cas), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.DeleteResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// UpsertOp represents a type of `BulkOp` used for Upsert operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type UpsertOp struct { + bulkOp + + ID string + Value interface{} + Expiry time.Duration + Cas Cas + Result *MutationResult + Err error +} + +func (item *UpsertOp) markError(err error) { + item.Err = err +} + +func (item *UpsertOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, + signal chan BulkOp, retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("UpsertOp", tracectx) + item.bulkOp.span = span + + etrace := c.startKvOpTrace("encode", span.Context()) + bytes, flags, err := transcoder.Encode(item.Value) + etrace.Finish() + if err != nil { + item.Err = err + signal <- item + return + } + + op, err := provider.Set(gocbcore.SetOptions{ + Key: []byte(item.ID), + Value: bytes, + Flags: flags, + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.StoreResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// InsertOp represents a type of `BulkOp` used for Insert operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type InsertOp struct { + bulkOp + + ID string + Value interface{} + Expiry time.Duration + Result *MutationResult + Err error +} + +func (item *InsertOp) markError(err error) { + item.Err = err +} + +func (item *InsertOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("InsertOp", tracectx) + item.bulkOp.span = span + + etrace := c.startKvOpTrace("encode", span.Context()) + bytes, flags, err := transcoder.Encode(item.Value) + if err != nil { + etrace.Finish() + item.Err = err + signal <- item + return + } + etrace.Finish() + + op, err := provider.Add(gocbcore.AddOptions{ + Key: []byte(item.ID), + Value: bytes, + Flags: flags, + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.StoreResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// ReplaceOp represents a type of `BulkOp` used for Replace operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type ReplaceOp struct { + bulkOp + + ID string + Value interface{} + Expiry time.Duration + Cas Cas + Result *MutationResult + Err error +} + +func (item *ReplaceOp) markError(err error) { + item.Err = err +} + +func (item *ReplaceOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("ReplaceOp", tracectx) + item.bulkOp.span = span + + etrace := c.startKvOpTrace("encode", span.Context()) + bytes, flags, err := transcoder.Encode(item.Value) + if err != nil { + etrace.Finish() + item.Err = err + signal <- item + return + } + etrace.Finish() + + op, err := provider.Replace(gocbcore.ReplaceOptions{ + Key: []byte(item.ID), + Value: bytes, + Flags: flags, + Cas: gocbcore.Cas(item.Cas), + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.StoreResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// AppendOp represents a type of `BulkOp` used for Append operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type AppendOp struct { + bulkOp + + ID string + Value string + Result *MutationResult + Err error +} + +func (item *AppendOp) markError(err error) { + item.Err = err +} + +func (item *AppendOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("AppendOp", tracectx) + item.bulkOp.span = span + + op, err := provider.Append(gocbcore.AdjoinOptions{ + Key: []byte(item.ID), + Value: []byte(item.Value), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.AdjoinResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// PrependOp represents a type of `BulkOp` used for Prepend operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type PrependOp struct { + bulkOp + + ID string + Value string + Result *MutationResult + Err error +} + +func (item *PrependOp) markError(err error) { + item.Err = err +} + +func (item *PrependOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("PrependOp", tracectx) + item.bulkOp.span = span + + op, err := provider.Prepend(gocbcore.AdjoinOptions{ + Key: []byte(item.ID), + Value: []byte(item.Value), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.AdjoinResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// IncrementOp represents a type of `BulkOp` used for Increment operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type IncrementOp struct { + bulkOp + + ID string + Delta int64 + Initial int64 + Expiry time.Duration + + Result *CounterResult + Err error +} + +func (item *IncrementOp) markError(err error) { + item.Err = err +} + +func (item *IncrementOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("IncrementOp", tracectx) + item.bulkOp.span = span + + realInitial := uint64(0xFFFFFFFFFFFFFFFF) + if item.Initial > 0 { + realInitial = uint64(item.Initial) + } + + op, err := provider.Increment(gocbcore.CounterOptions{ + Key: []byte(item.ID), + Delta: uint64(item.Delta), + Initial: realInitial, + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.CounterResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &CounterResult{ + MutationResult: MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + }, + content: res.Value, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} + +// DecrementOp represents a type of `BulkOp` used for Decrement operations. See BulkOp. +// UNCOMMITTED: This API may change in the future. +type DecrementOp struct { + bulkOp + + ID string + Delta int64 + Initial int64 + Expiry time.Duration + + Result *CounterResult + Err error +} + +func (item *DecrementOp) markError(err error) { + item.Err = err +} + +func (item *DecrementOp) execute(tracectx requestSpanContext, c *Collection, provider kvProvider, transcoder Transcoder, signal chan BulkOp, + retryWrapper *retryStrategyWrapper, deadline time.Time, startSpanFunc func(string, requestSpanContext) requestSpan) { + span := startSpanFunc("DecrementOp", tracectx) + item.bulkOp.span = span + + realInitial := uint64(0xFFFFFFFFFFFFFFFF) + if item.Initial > 0 { + realInitial = uint64(item.Initial) + } + + op, err := provider.Decrement(gocbcore.CounterOptions{ + Key: []byte(item.ID), + Delta: uint64(item.Delta), + Initial: realInitial, + Expiry: durationToExpiry(item.Expiry), + CollectionName: c.name(), + ScopeName: c.ScopeName(), + RetryStrategy: retryWrapper, + TraceContext: span.Context(), + Deadline: deadline, + }, func(res *gocbcore.CounterResult, err error) { + item.Err = maybeEnhanceCollKVErr(err, provider, c, item.ID) + if item.Err == nil { + item.Result = &CounterResult{ + MutationResult: MutationResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + }, + content: res.Value, + } + + if res.MutationToken.VbUUID != 0 { + mutTok := &MutationToken{ + token: res.MutationToken, + bucketName: c.bucketName(), + } + item.Result.mt = mutTok + } + } + signal <- item + }) + if err != nil { + item.Err = err + signal <- item + } else { + item.bulkOp.pendop = op + } +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection_crud.go b/vendor/github.com/couchbase/gocb/v2/collection_crud.go new file mode 100644 index 000000000000..89bca118aeae --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection_crud.go @@ -0,0 +1,1040 @@ +package gocb + +import ( + "errors" + "sync" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type kvProvider interface { + Add(opts gocbcore.AddOptions, cb gocbcore.StoreCallback) (gocbcore.PendingOp, error) + Set(opts gocbcore.SetOptions, cb gocbcore.StoreCallback) (gocbcore.PendingOp, error) + Replace(opts gocbcore.ReplaceOptions, cb gocbcore.StoreCallback) (gocbcore.PendingOp, error) + Get(opts gocbcore.GetOptions, cb gocbcore.GetCallback) (gocbcore.PendingOp, error) + GetOneReplica(opts gocbcore.GetOneReplicaOptions, cb gocbcore.GetReplicaCallback) (gocbcore.PendingOp, error) + Observe(opts gocbcore.ObserveOptions, cb gocbcore.ObserveCallback) (gocbcore.PendingOp, error) + ObserveVb(opts gocbcore.ObserveVbOptions, cb gocbcore.ObserveVbCallback) (gocbcore.PendingOp, error) + GetMeta(opts gocbcore.GetMetaOptions, cb gocbcore.GetMetaCallback) (gocbcore.PendingOp, error) + Delete(opts gocbcore.DeleteOptions, cb gocbcore.DeleteCallback) (gocbcore.PendingOp, error) + LookupIn(opts gocbcore.LookupInOptions, cb gocbcore.LookupInCallback) (gocbcore.PendingOp, error) + MutateIn(opts gocbcore.MutateInOptions, cb gocbcore.MutateInCallback) (gocbcore.PendingOp, error) + GetAndTouch(opts gocbcore.GetAndTouchOptions, cb gocbcore.GetAndTouchCallback) (gocbcore.PendingOp, error) + GetAndLock(opts gocbcore.GetAndLockOptions, cb gocbcore.GetAndLockCallback) (gocbcore.PendingOp, error) + Unlock(opts gocbcore.UnlockOptions, cb gocbcore.UnlockCallback) (gocbcore.PendingOp, error) + Touch(opts gocbcore.TouchOptions, cb gocbcore.TouchCallback) (gocbcore.PendingOp, error) + Increment(opts gocbcore.CounterOptions, cb gocbcore.CounterCallback) (gocbcore.PendingOp, error) + Decrement(opts gocbcore.CounterOptions, cb gocbcore.CounterCallback) (gocbcore.PendingOp, error) + Append(opts gocbcore.AdjoinOptions, cb gocbcore.AdjoinCallback) (gocbcore.PendingOp, error) + Prepend(opts gocbcore.AdjoinOptions, cb gocbcore.AdjoinCallback) (gocbcore.PendingOp, error) + ConfigSnapshot() (*gocbcore.ConfigSnapshot, error) +} + +// Cas represents the specific state of a document on the cluster. +type Cas gocbcore.Cas + +// InsertOptions are options that can be applied to an Insert operation. +type InsertOptions struct { + Expiry time.Duration + PersistTo uint + ReplicateTo uint + DurabilityLevel DurabilityLevel + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Insert creates a new document in the Collection. +func (c *Collection) Insert(id string, val interface{}, opts *InsertOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &InsertOptions{} + } + + opm := c.newKvOpManager("Insert", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetValue(val) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Add(gocbcore.AddOptions{ + Key: opm.DocumentID(), + Value: opm.ValueBytes(), + Flags: opm.ValueFlags(), + Expiry: durationToExpiry(opts.Expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.StoreResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// UpsertOptions are options that can be applied to an Upsert operation. +type UpsertOptions struct { + Expiry time.Duration + PersistTo uint + ReplicateTo uint + DurabilityLevel DurabilityLevel + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Upsert creates a new document in the Collection if it does not exist, if it does exist then it updates it. +func (c *Collection) Upsert(id string, val interface{}, opts *UpsertOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &UpsertOptions{} + } + + opm := c.newKvOpManager("Upsert", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetValue(val) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Set(gocbcore.SetOptions{ + Key: opm.DocumentID(), + Value: opm.ValueBytes(), + Flags: opm.ValueFlags(), + Expiry: durationToExpiry(opts.Expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.StoreResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// ReplaceOptions are the options available to a Replace operation. +type ReplaceOptions struct { + Expiry time.Duration + Cas Cas + PersistTo uint + ReplicateTo uint + DurabilityLevel DurabilityLevel + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Replace updates a document in the collection. +func (c *Collection) Replace(id string, val interface{}, opts *ReplaceOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &ReplaceOptions{} + } + + opm := c.newKvOpManager("Replace", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetValue(val) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Replace(gocbcore.ReplaceOptions{ + Key: opm.DocumentID(), + Value: opm.ValueBytes(), + Flags: opm.ValueFlags(), + Expiry: durationToExpiry(opts.Expiry), + Cas: gocbcore.Cas(opts.Cas), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.StoreResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// GetOptions are the options available to a Get operation. +type GetOptions struct { + WithExpiry bool + // Project causes the Get operation to only fetch the fields indicated + // by the paths. The result of the operation is then treated as a + // standard GetResult. + Project []string + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Get performs a fetch operation against the collection. This can take 3 paths, a standard full document +// fetch, a subdocument full document fetch also fetching document expiry (when WithExpiry is set), +// or a subdocument fetch (when Project is used). +func (c *Collection) Get(id string, opts *GetOptions) (docOut *GetResult, errOut error) { + if opts == nil { + opts = &GetOptions{} + } + + if len(opts.Project) == 0 && !opts.WithExpiry { + return c.getDirect(id, opts) + } + + return c.getProjected(id, opts) +} + +func (c *Collection) getDirect(id string, opts *GetOptions) (docOut *GetResult, errOut error) { + if opts == nil { + opts = &GetOptions{} + } + + opm := c.newKvOpManager("Get", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Get(gocbcore.GetOptions{ + Key: opm.DocumentID(), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.GetResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + doc := &GetResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + transcoder: opm.Transcoder(), + contents: res.Value, + flags: res.Flags, + } + + docOut = doc + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return +} + +func (c *Collection) getProjected(id string, opts *GetOptions) (docOut *GetResult, errOut error) { + if opts == nil { + opts = &GetOptions{} + } + + opm := c.newKvOpManager("Get", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if opts.Transcoder != nil { + return nil, errors.New("Cannot specify custom transcoder for projected gets") + } + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + numProjects := len(opts.Project) + if opts.WithExpiry { + numProjects = 1 + numProjects + } + + projections := opts.Project + if numProjects > 16 { + projections = nil + } + + var ops []LookupInSpec + + if opts.WithExpiry { + ops = append(ops, GetSpec("$document.exptime", &GetSpecOptions{IsXattr: true})) + } + + if len(projections) == 0 { + ops = append(ops, GetSpec("", nil)) + } else { + for _, path := range projections { + ops = append(ops, GetSpec(path, nil)) + } + } + + result, err := c.internalLookupIn(opm, ops, false) + if err != nil { + return nil, err + } + + doc := &GetResult{} + if opts.WithExpiry { + // if expiration was requested then extract and remove it from the results + err = result.ContentAt(0, &doc.expiry) + if err != nil { + return nil, err + } + ops = ops[1:] + result.contents = result.contents[1:] + } + + doc.transcoder = opm.Transcoder() + doc.cas = result.cas + if projections == nil { + err = doc.fromFullProjection(ops, result, opts.Project) + if err != nil { + return nil, err + } + } else { + err = doc.fromSubDoc(ops, result) + if err != nil { + return nil, err + } + } + + return doc, nil +} + +// ExistsOptions are the options available to the Exists command. +type ExistsOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Exists checks if a document exists for the given id. +func (c *Collection) Exists(id string, opts *ExistsOptions) (docOut *ExistsResult, errOut error) { + if opts == nil { + opts = &ExistsOptions{} + } + + opm := c.newKvOpManager("Exists", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.GetMeta(gocbcore.GetMetaOptions{ + Key: opm.DocumentID(), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan, + Deadline: opm.Deadline(), + }, func(res *gocbcore.GetMetaResult, err error) { + if errors.Is(err, ErrDocumentNotFound) { + docOut = &ExistsResult{ + Result: Result{ + cas: Cas(0), + }, + docExists: false, + } + opm.Resolve(nil) + return + } + + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + if res != nil { + docOut = &ExistsResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + docExists: res.Deleted == 0, + } + } + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return +} + +func (c *Collection) getOneReplica( + span requestSpanContext, + id string, + replicaIdx int, + transcoder Transcoder, + retryStrategy RetryStrategy, + cancelCh chan struct{}, + timeout time.Duration, +) (docOut *GetReplicaResult, errOut error) { + opm := c.newKvOpManager("getOneReplica", span) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(transcoder) + opm.SetRetryStrategy(retryStrategy) + opm.SetTimeout(timeout) + opm.SetCancelCh(cancelCh) + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + if replicaIdx == 0 { + err = opm.Wait(agent.Get(gocbcore.GetOptions{ + Key: opm.DocumentID(), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.GetResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + docOut = &GetReplicaResult{} + docOut.cas = Cas(res.Cas) + docOut.transcoder = opm.Transcoder() + docOut.contents = res.Value + docOut.flags = res.Flags + docOut.isReplica = false + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return + } + + err = opm.Wait(agent.GetOneReplica(gocbcore.GetOneReplicaOptions{ + Key: opm.DocumentID(), + ReplicaIdx: replicaIdx, + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.GetReplicaResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + docOut = &GetReplicaResult{} + docOut.cas = Cas(res.Cas) + docOut.transcoder = opm.Transcoder() + docOut.contents = res.Value + docOut.flags = res.Flags + docOut.isReplica = true + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return +} + +// GetAllReplicaOptions are the options available to the GetAllReplicas command. +type GetAllReplicaOptions struct { + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAllReplicasResult represents the results of a GetAllReplicas operation. +type GetAllReplicasResult struct { + lock sync.Mutex + totalRequests uint32 + totalResults uint32 + resCh chan *GetReplicaResult + cancelCh chan struct{} +} + +func (r *GetAllReplicasResult) addResult(res *GetReplicaResult) { + // We use a lock here because the alternative means that there is a race + // between the channel writes from multiple results and the channels being + // closed. IE: T1-Incr, T2-Incr, T2-Send, T2-Close, T1-Send[PANIC] + r.lock.Lock() + + r.totalResults++ + resultCount := r.totalResults + + if resultCount <= r.totalRequests { + r.resCh <- res + } + + if resultCount == r.totalRequests { + close(r.cancelCh) + close(r.resCh) + } + + r.lock.Unlock() +} + +// Next fetches the next replica result. +func (r *GetAllReplicasResult) Next() *GetReplicaResult { + return <-r.resCh +} + +// Close cancels all remaining get replica requests. +func (r *GetAllReplicasResult) Close() error { + // See addResult discussion on lock usage. + r.lock.Lock() + + // Note that this number increment must be high enough to be clear that + // the result set was closed, but low enough that it won't overflow if + // additional result objects are processed after the close. + prevResultCount := r.totalResults + r.totalResults += 100000 + + // We only have to close everything if the addResult method didn't already + // close them due to already having completed every request + if prevResultCount < r.totalRequests { + close(r.cancelCh) + close(r.resCh) + } + + r.lock.Unlock() + + return nil +} + +// GetAllReplicas returns the value of a particular document from all replica servers. This will return an iterable +// which streams results one at a time. +func (c *Collection) GetAllReplicas(id string, opts *GetAllReplicaOptions) (docOut *GetAllReplicasResult, errOut error) { + if opts == nil { + opts = &GetAllReplicaOptions{} + } + + span := c.startKvOpTrace("GetAllReplicas", nil) + defer span.Finish() + + // Timeout needs to be adjusted here, since we use it at the bottom of this + // function, but the remaining options are all passed downwards and get handled + // by those functions rather than us. + timeout := opts.Timeout + if timeout == 0 { + timeout = c.timeoutsConfig.KVTimeout + } + + deadline := time.Now().Add(timeout) + transcoder := opts.Transcoder + retryStrategy := opts.RetryStrategy + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + + snapshot, err := agent.ConfigSnapshot() + if err != nil { + return nil, err + } + + numReplicas, err := snapshot.NumReplicas() + if err != nil { + return nil, err + } + + numServers := numReplicas + 1 + outCh := make(chan *GetReplicaResult, numServers) + cancelCh := make(chan struct{}) + + repRes := &GetAllReplicasResult{ + totalRequests: uint32(numServers), + resCh: outCh, + cancelCh: cancelCh, + } + + // Loop all the servers and populate the result object + for replicaIdx := 0; replicaIdx < numServers; replicaIdx++ { + go func(replicaIdx int) { + // This timeout value will cause the getOneReplica operation to timeout after our deadline has expired, + // as the deadline has already begun. getOneReplica timing out before our deadline would cause inconsistent + // behaviour. + res, err := c.getOneReplica(span, id, replicaIdx, transcoder, retryStrategy, cancelCh, timeout) + if err != nil { + logDebugf("Failed to fetch replica from replica %d: %s", replicaIdx, err) + } else { + repRes.addResult(res) + } + }(replicaIdx) + } + + // Start a timer to close it after the deadline + go func() { + select { + case <-time.After(time.Until(deadline)): + // If we timeout, we should close the result + err := repRes.Close() + if err != nil { + logDebugf("failed to close GetAllReplicas response: %s", err) + } + return + case <-cancelCh: + // If the cancel channel closes, we are done + return + } + }() + + return repRes, nil +} + +// GetAnyReplicaOptions are the options available to the GetAnyReplica command. +type GetAnyReplicaOptions struct { + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAnyReplica returns the value of a particular document from a replica server. +func (c *Collection) GetAnyReplica(id string, opts *GetAnyReplicaOptions) (docOut *GetReplicaResult, errOut error) { + if opts == nil { + opts = &GetAnyReplicaOptions{} + } + + span := c.startKvOpTrace("GetAnyReplica", nil) + defer span.Finish() + + repRes, err := c.GetAllReplicas(id, &GetAllReplicaOptions{ + Timeout: opts.Timeout, + Transcoder: opts.Transcoder, + RetryStrategy: opts.RetryStrategy, + }) + if err != nil { + return nil, err + } + + // Try to fetch at least one result + res := repRes.Next() + if res == nil { + return nil, &KeyValueError{ + InnerError: ErrDocumentUnretrievable, + BucketName: c.bucketName(), + ScopeName: c.scope, + CollectionName: c.collectionName, + } + } + + // Close the results channel since we don't care about any of the + // remaining result objects at this point. + err = repRes.Close() + if err != nil { + logDebugf("failed to close GetAnyReplica response: %s", err) + } + + return res, nil +} + +// RemoveOptions are the options available to the Remove command. +type RemoveOptions struct { + Cas Cas + PersistTo uint + ReplicateTo uint + DurabilityLevel DurabilityLevel + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Remove removes a document from the collection. +func (c *Collection) Remove(id string, opts *RemoveOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &RemoveOptions{} + } + + opm := c.newKvOpManager("Remove", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetDuraOptions(opts.PersistTo, opts.ReplicateTo, opts.DurabilityLevel) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Delete(gocbcore.DeleteOptions{ + Key: opm.DocumentID(), + Cas: gocbcore.Cas(opts.Cas), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.DeleteResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// GetAndTouchOptions are the options available to the GetAndTouch operation. +type GetAndTouchOptions struct { + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAndTouch retrieves a document and simultaneously updates its expiry time. +func (c *Collection) GetAndTouch(id string, expiry time.Duration, opts *GetAndTouchOptions) (docOut *GetResult, errOut error) { + if opts == nil { + opts = &GetAndTouchOptions{} + } + + opm := c.newKvOpManager("GetAndTouch", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.GetAndTouch(gocbcore.GetAndTouchOptions{ + Key: opm.DocumentID(), + Expiry: durationToExpiry(expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.GetAndTouchResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + if res != nil { + doc := &GetResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + transcoder: opm.Transcoder(), + contents: res.Value, + flags: res.Flags, + } + + docOut = doc + } + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return +} + +// GetAndLockOptions are the options available to the GetAndLock operation. +type GetAndLockOptions struct { + Transcoder Transcoder + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// GetAndLock locks a document for a period of time, providing exclusive RW access to it. +// A lockTime value of over 30 seconds will be treated as 30 seconds. The resolution used to send this value to +// the server is seconds and is calculated using uint32(lockTime/time.Second). +func (c *Collection) GetAndLock(id string, lockTime time.Duration, opts *GetAndLockOptions) (docOut *GetResult, errOut error) { + if opts == nil { + opts = &GetAndLockOptions{} + } + + opm := c.newKvOpManager("GetAndLock", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetTranscoder(opts.Transcoder) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.GetAndLock(gocbcore.GetAndLockOptions{ + Key: opm.DocumentID(), + LockTime: uint32(lockTime / time.Second), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.GetAndLockResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + if res != nil { + doc := &GetResult{ + Result: Result{ + cas: Cas(res.Cas), + }, + transcoder: opm.Transcoder(), + contents: res.Value, + flags: res.Flags, + } + + docOut = doc + } + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return +} + +// UnlockOptions are the options available to the GetAndLock operation. +type UnlockOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Unlock unlocks a document which was locked with GetAndLock. +func (c *Collection) Unlock(id string, cas Cas, opts *UnlockOptions) (errOut error) { + if opts == nil { + opts = &UnlockOptions{} + } + + opm := c.newKvOpManager("Unlock", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return err + } + + agent, err := c.getKvProvider() + if err != nil { + return err + } + err = opm.Wait(agent.Unlock(gocbcore.UnlockOptions{ + Key: opm.DocumentID(), + Cas: gocbcore.Cas(cas), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.UnlockResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mt := opm.EnhanceMt(res.MutationToken) + opm.Resolve(mt) + })) + if err != nil { + errOut = err + } + return +} + +// TouchOptions are the options available to the Touch operation. +type TouchOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy +} + +// Touch touches a document, specifying a new expiry time for it. +func (c *Collection) Touch(id string, expiry time.Duration, opts *TouchOptions) (mutOut *MutationResult, errOut error) { + if opts == nil { + opts = &TouchOptions{} + } + + opm := c.newKvOpManager("Touch", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.Touch(gocbcore.TouchOptions{ + Key: opm.DocumentID(), + Expiry: durationToExpiry(expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.TouchResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutationResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} + +// Binary creates and returns a BinaryCollection object. +func (c *Collection) Binary() *BinaryCollection { + return &BinaryCollection{collection: c} +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection_ds.go b/vendor/github.com/couchbase/gocb/v2/collection_ds.go new file mode 100644 index 000000000000..c22796f7498d --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection_ds.go @@ -0,0 +1,476 @@ +package gocb + +import ( + "errors" + "fmt" +) + +// CouchbaseList represents a list document. +type CouchbaseList struct { + collection *Collection + id string +} + +// List returns a new CouchbaseList for the document specified by id. +func (c *Collection) List(id string) *CouchbaseList { + return &CouchbaseList{ + collection: c, + id: id, + } +} + +// Iterator returns an iterable for all items in the list. +func (cl *CouchbaseList) Iterator() ([]interface{}, error) { + content, err := cl.collection.Get(cl.id, nil) + if err != nil { + return nil, err + } + + var listContents []interface{} + err = content.Content(&listContents) + if err != nil { + return nil, err + } + + return listContents, nil +} + +// At retrieves the value specified at the given index from the list. +func (cl *CouchbaseList) At(index int, valuePtr interface{}) error { + ops := make([]LookupInSpec, 1) + ops[0] = GetSpec(fmt.Sprintf("[%d]", index), nil) + result, err := cl.collection.LookupIn(cl.id, ops, nil) + if err != nil { + return err + } + + return result.ContentAt(0, valuePtr) +} + +// RemoveAt removes the value specified at the given index from the list. +func (cl *CouchbaseList) RemoveAt(index int) error { + ops := make([]MutateInSpec, 1) + ops[0] = RemoveSpec(fmt.Sprintf("[%d]", index), nil) + _, err := cl.collection.MutateIn(cl.id, ops, nil) + if err != nil { + return err + } + + return nil +} + +// Append appends an item to the list. +func (cl *CouchbaseList) Append(val interface{}) error { + ops := make([]MutateInSpec, 1) + ops[0] = ArrayAppendSpec("", val, nil) + _, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert}) + if err != nil { + return err + } + + return nil +} + +// Prepend prepends an item to the list. +func (cl *CouchbaseList) Prepend(val interface{}) error { + ops := make([]MutateInSpec, 1) + ops[0] = ArrayPrependSpec("", val, nil) + _, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert}) + if err != nil { + return err + } + + return nil +} + +// IndexOf gets the index of the item in the list. +func (cl *CouchbaseList) IndexOf(val interface{}) (int, error) { + content, err := cl.collection.Get(cl.id, nil) + if err != nil { + return 0, err + } + + var listContents []interface{} + err = content.Content(&listContents) + if err != nil { + return 0, err + } + + for i, item := range listContents { + if item == val { + return i, nil + } + } + + return -1, nil +} + +// Size returns the size of the list. +func (cl *CouchbaseList) Size() (int, error) { + ops := make([]LookupInSpec, 1) + ops[0] = CountSpec("", nil) + result, err := cl.collection.LookupIn(cl.id, ops, nil) + if err != nil { + return 0, err + } + + var count int + err = result.ContentAt(0, &count) + if err != nil { + return 0, err + } + + return count, nil +} + +// Clear clears a list, also removing it. +func (cl *CouchbaseList) Clear() error { + _, err := cl.collection.Remove(cl.id, nil) + if err != nil { + return err + } + + return nil +} + +// CouchbaseMap represents a map document. +type CouchbaseMap struct { + collection *Collection + id string +} + +// Map returns a new CouchbaseMap. +func (c *Collection) Map(id string) *CouchbaseMap { + return &CouchbaseMap{ + collection: c, + id: id, + } +} + +// Iterator returns an iterable for all items in the map. +func (cl *CouchbaseMap) Iterator() (map[string]interface{}, error) { + content, err := cl.collection.Get(cl.id, nil) + if err != nil { + return nil, err + } + + var mapContents map[string]interface{} + err = content.Content(&mapContents) + if err != nil { + return nil, err + } + + return mapContents, nil +} + +// At retrieves the item for the given id from the map. +func (cl *CouchbaseMap) At(id string, valuePtr interface{}) error { + ops := make([]LookupInSpec, 1) + ops[0] = GetSpec(fmt.Sprintf("[%s]", id), nil) + result, err := cl.collection.LookupIn(cl.id, ops, nil) + if err != nil { + return err + } + + return result.ContentAt(0, valuePtr) +} + +// Add adds an item to the map. +func (cl *CouchbaseMap) Add(id string, val interface{}) error { + ops := make([]MutateInSpec, 1) + ops[0] = UpsertSpec(id, val, nil) + _, err := cl.collection.MutateIn(cl.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert}) + if err != nil { + return err + } + + return nil +} + +// Remove removes an item from the map. +func (cl *CouchbaseMap) Remove(id string) error { + ops := make([]MutateInSpec, 1) + ops[0] = RemoveSpec(id, nil) + _, err := cl.collection.MutateIn(cl.id, ops, nil) + if err != nil { + return err + } + + return nil +} + +// Exists verifies whether or a id exists in the map. +func (cl *CouchbaseMap) Exists(id string) (bool, error) { + ops := make([]LookupInSpec, 1) + ops[0] = ExistsSpec(fmt.Sprintf("[%s]", id), nil) + result, err := cl.collection.LookupIn(cl.id, ops, nil) + if err != nil { + return false, err + } + + return result.Exists(0), nil +} + +// Size returns the size of the map. +func (cl *CouchbaseMap) Size() (int, error) { + ops := make([]LookupInSpec, 1) + ops[0] = CountSpec("", nil) + result, err := cl.collection.LookupIn(cl.id, ops, nil) + if err != nil { + return 0, err + } + + var count int + err = result.ContentAt(0, &count) + if err != nil { + return 0, err + } + + return count, nil +} + +// Keys returns all of the keys within the map. +func (cl *CouchbaseMap) Keys() ([]string, error) { + content, err := cl.collection.Get(cl.id, nil) + if err != nil { + return nil, err + } + + var mapContents map[string]interface{} + err = content.Content(&mapContents) + if err != nil { + return nil, err + } + + var keys []string + for id := range mapContents { + keys = append(keys, id) + } + + return keys, nil +} + +// Values returns all of the values within the map. +func (cl *CouchbaseMap) Values() ([]interface{}, error) { + content, err := cl.collection.Get(cl.id, nil) + if err != nil { + return nil, err + } + + var mapContents map[string]interface{} + err = content.Content(&mapContents) + if err != nil { + return nil, err + } + + var values []interface{} + for _, val := range mapContents { + values = append(values, val) + } + + return values, nil +} + +// Clear clears a map, also removing it. +func (cl *CouchbaseMap) Clear() error { + _, err := cl.collection.Remove(cl.id, nil) + if err != nil { + return err + } + + return nil +} + +// CouchbaseSet represents a set document. +type CouchbaseSet struct { + id string + underlying *CouchbaseList +} + +// Set returns a new CouchbaseSet. +func (c *Collection) Set(id string) *CouchbaseSet { + return &CouchbaseSet{ + id: id, + underlying: c.List(id), + } +} + +// Iterator returns an iterable for all items in the set. +func (cs *CouchbaseSet) Iterator() ([]interface{}, error) { + return cs.underlying.Iterator() +} + +// Add adds a value to the set. +func (cs *CouchbaseSet) Add(val interface{}) error { + ops := make([]MutateInSpec, 1) + ops[0] = ArrayAddUniqueSpec("", val, nil) + _, err := cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{StoreSemantic: StoreSemanticsUpsert}) + if err != nil { + return err + } + + return nil +} + +// Remove removes an value from the set. +func (cs *CouchbaseSet) Remove(val string) error { + for i := 0; i < 16; i++ { + content, err := cs.underlying.collection.Get(cs.id, nil) + if err != nil { + return err + } + + cas := content.Cas() + + var setContents []interface{} + err = content.Content(&setContents) + if err != nil { + return err + } + + indexToRemove := -1 + for i, item := range setContents { + if item == val { + indexToRemove = i + } + } + + if indexToRemove > -1 { + ops := make([]MutateInSpec, 1) + ops[0] = RemoveSpec(fmt.Sprintf("[%d]", indexToRemove), nil) + _, err = cs.underlying.collection.MutateIn(cs.id, ops, &MutateInOptions{Cas: cas}) + if errors.Is(err, ErrCasMismatch) { + continue + } + if err != nil { + return err + } + } + return nil + } + + return errors.New("failed to perform operation after 16 retries") +} + +// Values returns all of the values within the set. +func (cs *CouchbaseSet) Values() ([]interface{}, error) { + content, err := cs.underlying.collection.Get(cs.id, nil) + if err != nil { + return nil, err + } + + var setContents []interface{} + err = content.Content(&setContents) + if err != nil { + return nil, err + } + + return setContents, nil +} + +// Contains verifies whether or not a value exists within the set. +func (cs *CouchbaseSet) Contains(val string) (bool, error) { + content, err := cs.underlying.collection.Get(cs.id, nil) + if err != nil { + return false, err + } + + var setContents []interface{} + err = content.Content(&setContents) + if err != nil { + return false, err + } + + for _, item := range setContents { + if item == val { + return true, nil + } + } + + return false, nil +} + +// Size returns the size of the set +func (cs *CouchbaseSet) Size() (int, error) { + return cs.underlying.Size() +} + +// Clear clears a set, also removing it. +func (cs *CouchbaseSet) Clear() error { + err := cs.underlying.Clear() + if err != nil { + return err + } + + return nil +} + +// CouchbaseQueue represents a queue document. +type CouchbaseQueue struct { + id string + underlying *CouchbaseList +} + +// Queue returns a new CouchbaseQueue. +func (c *Collection) Queue(id string) *CouchbaseQueue { + return &CouchbaseQueue{ + id: id, + underlying: c.List(id), + } +} + +// Iterator returns an iterable for all items in the queue. +func (cs *CouchbaseQueue) Iterator() ([]interface{}, error) { + return cs.underlying.Iterator() +} + +// Push pushes a value onto the queue. +func (cs *CouchbaseQueue) Push(val interface{}) error { + return cs.underlying.Prepend(val) +} + +// Pop pops an items off of the queue. +func (cs *CouchbaseQueue) Pop(valuePtr interface{}) error { + for i := 0; i < 16; i++ { + ops := make([]LookupInSpec, 1) + ops[0] = GetSpec("[-1]", nil) + content, err := cs.underlying.collection.LookupIn(cs.id, ops, nil) + if err != nil { + return err + } + + cas := content.Cas() + err = content.ContentAt(0, valuePtr) + if err != nil { + return err + } + + mutateOps := make([]MutateInSpec, 1) + mutateOps[0] = RemoveSpec("[-1]", nil) + _, err = cs.underlying.collection.MutateIn(cs.id, mutateOps, &MutateInOptions{Cas: cas}) + if errors.Is(err, ErrCasMismatch) { + continue + } + if err != nil { + return err + } + return nil + } + + return errors.New("failed to perform operation after 16 retries") +} + +// Size returns the size of the queue. +func (cs *CouchbaseQueue) Size() (int, error) { + return cs.underlying.Size() +} + +// Clear clears a queue, also removing it. +func (cs *CouchbaseQueue) Clear() error { + err := cs.underlying.Clear() + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection_dura.go b/vendor/github.com/couchbase/gocb/v2/collection_dura.go new file mode 100644 index 000000000000..8f6004d20b67 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection_dura.go @@ -0,0 +1,174 @@ +package gocb + +import ( + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +func (c *Collection) observeOnceSeqNo( + tracectx requestSpanContext, + docID string, + mt gocbcore.MutationToken, + replicaIdx int, + cancelCh chan struct{}, + timeout time.Duration, +) (didReplicate, didPersist bool, errOut error) { + opm := c.newKvOpManager("observeOnceSeqNo", tracectx) + defer opm.Finish() + + opm.SetDocumentID(docID) + opm.SetCancelCh(cancelCh) + opm.SetTimeout(timeout) + + agent, err := c.getKvProvider() + if err != nil { + return false, false, err + } + err = opm.Wait(agent.ObserveVb(gocbcore.ObserveVbOptions{ + VbID: mt.VbID, + VbUUID: mt.VbUUID, + ReplicaIdx: replicaIdx, + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.ObserveVbResult, err error) { + if err != nil || res == nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + didReplicate = res.CurrentSeqNo >= mt.SeqNo + didPersist = res.PersistSeqNo >= mt.SeqNo + + opm.Resolve(nil) + })) + if err != nil { + errOut = err + } + return +} + +func (c *Collection) observeOne( + tracectx requestSpanContext, + docID string, + mt gocbcore.MutationToken, + replicaIdx int, + replicaCh, persistCh chan struct{}, + cancelCh chan struct{}, + timeout time.Duration, +) { + sentReplicated := false + sentPersisted := false + + calc := gocbcore.ExponentialBackoff(10*time.Microsecond, 100*time.Millisecond, 0) + retries := uint32(0) + +ObserveLoop: + for { + select { + case <-cancelCh: + break ObserveLoop + default: + // not cancelled yet + } + + didReplicate, didPersist, err := c.observeOnceSeqNo(tracectx, docID, mt, replicaIdx, cancelCh, timeout) + if err != nil { + logDebugf("ObserveOnce failed unexpected: %s", err) + return + } + + if didReplicate && !sentReplicated { + replicaCh <- struct{}{} + sentReplicated = true + } + + if didPersist && !sentPersisted { + persistCh <- struct{}{} + sentPersisted = true + } + + // If we've got persisted and replicated, we can just stop + if sentPersisted && sentReplicated { + break ObserveLoop + } + + waitTmr := gocbcore.AcquireTimer(calc(retries)) + retries++ + select { + case <-waitTmr.C: + gocbcore.ReleaseTimer(waitTmr, true) + case <-cancelCh: + gocbcore.ReleaseTimer(waitTmr, false) + } + } +} + +func (c *Collection) waitForDurability( + tracectx requestSpanContext, + docID string, + mt gocbcore.MutationToken, + replicateTo uint, + persistTo uint, + deadline time.Time, + cancelCh chan struct{}, +) error { + opm := c.newKvOpManager("waitForDurability", tracectx) + defer opm.Finish() + + opm.SetDocumentID(docID) + + agent, err := c.getKvProvider() + if err != nil { + return err + } + + snapshot, err := agent.ConfigSnapshot() + if err != nil { + return err + } + + numReplicas, err := snapshot.NumReplicas() + if err != nil { + return err + } + + numServers := numReplicas + 1 + if replicateTo > uint(numServers-1) || persistTo > uint(numServers) { + return opm.EnhanceErr(ErrDurabilityImpossible) + } + + subOpCancelCh := make(chan struct{}, 1) + replicaCh := make(chan struct{}, numServers) + persistCh := make(chan struct{}, numServers) + + for replicaIdx := 0; replicaIdx < numServers; replicaIdx++ { + go c.observeOne(opm.TraceSpan(), docID, mt, replicaIdx, replicaCh, persistCh, subOpCancelCh, time.Until(deadline)) + } + + numReplicated := uint(0) + numPersisted := uint(0) + + for { + select { + case <-replicaCh: + numReplicated++ + case <-persistCh: + numPersisted++ + case <-time.After(time.Until(deadline)): + // deadline exceeded + close(subOpCancelCh) + return opm.EnhanceErr(ErrAmbiguousTimeout) + case <-cancelCh: + // parent asked for cancellation + close(subOpCancelCh) + return opm.EnhanceErr(ErrRequestCanceled) + } + + if numReplicated >= replicateTo && numPersisted >= persistTo { + close(subOpCancelCh) + return nil + } + } +} diff --git a/vendor/github.com/couchbase/gocb/v2/collection_subdoc.go b/vendor/github.com/couchbase/gocb/v2/collection_subdoc.go new file mode 100644 index 000000000000..8caf05c3f66c --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/collection_subdoc.go @@ -0,0 +1,316 @@ +package gocb + +import ( + "encoding/json" + "errors" + "time" + + "github.com/couchbase/gocbcore/v9/memd" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +// LookupInOptions are the set of options available to LookupIn. +type LookupInOptions struct { + Timeout time.Duration + RetryStrategy RetryStrategy + + // Internal: This should never be used and is not supported. + Internal struct { + AccessDeleted bool + } +} + +// LookupIn performs a set of subdocument lookup operations on the document identified by id. +func (c *Collection) LookupIn(id string, ops []LookupInSpec, opts *LookupInOptions) (docOut *LookupInResult, errOut error) { + if opts == nil { + opts = &LookupInOptions{} + } + + opm := c.newKvOpManager("LookupIn", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + return c.internalLookupIn(opm, ops, opts.Internal.AccessDeleted) +} + +func (c *Collection) internalLookupIn( + opm *kvOpManager, + ops []LookupInSpec, + accessDeleted bool, +) (docOut *LookupInResult, errOut error) { + var subdocs []gocbcore.SubDocOp + for _, op := range ops { + if op.op == memd.SubDocOpGet && op.path == "" { + if op.isXattr { + return nil, errors.New("invalid xattr fetch with no path") + } + + subdocs = append(subdocs, gocbcore.SubDocOp{ + Op: memd.SubDocOpGetDoc, + Flags: memd.SubdocFlag(SubdocFlagNone), + }) + continue + } else if op.op == memd.SubDocOpDictSet && op.path == "" { + if op.isXattr { + return nil, errors.New("invalid xattr set with no path") + } + + subdocs = append(subdocs, gocbcore.SubDocOp{ + Op: memd.SubDocOpSetDoc, + Flags: memd.SubdocFlag(SubdocFlagNone), + }) + continue + } + + flags := memd.SubdocFlagNone + if op.isXattr { + flags |= memd.SubdocFlagXattrPath + } + + subdocs = append(subdocs, gocbcore.SubDocOp{ + Op: op.op, + Path: op.path, + Flags: flags, + }) + } + + var flags memd.SubdocDocFlag + if accessDeleted { + flags = memd.SubdocDocFlagAccessDeleted + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + + err = opm.Wait(agent.LookupIn(gocbcore.LookupInOptions{ + Key: opm.DocumentID(), + Ops: subdocs, + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + Flags: flags, + }, func(res *gocbcore.LookupInResult, err error) { + if err != nil && res == nil { + errOut = opm.EnhanceErr(err) + } + + if res != nil { + docOut = &LookupInResult{} + docOut.cas = Cas(res.Cas) + docOut.contents = make([]lookupInPartial, len(subdocs)) + for i, opRes := range res.Ops { + docOut.contents[i].err = opm.EnhanceErr(opRes.Err) + docOut.contents[i].data = json.RawMessage(opRes.Value) + } + } + + if err == nil { + opm.Resolve(nil) + } else { + opm.Reject() + } + })) + if err != nil { + errOut = err + } + return +} + +// StoreSemantics is used to define the document level action to take during a MutateIn operation. +type StoreSemantics uint8 + +const ( + // StoreSemanticsReplace signifies to Replace the document, and fail if it does not exist. + // This is the default action + StoreSemanticsReplace StoreSemantics = iota + + // StoreSemanticsUpsert signifies to replace the document or create it if it doesn't exist. + StoreSemanticsUpsert + + // StoreSemanticsInsert signifies to create the document, and fail if it exists. + StoreSemanticsInsert +) + +// MutateInOptions are the set of options available to MutateIn. +type MutateInOptions struct { + Expiry time.Duration + Cas Cas + PersistTo uint + ReplicateTo uint + DurabilityLevel DurabilityLevel + StoreSemantic StoreSemantics + Timeout time.Duration + RetryStrategy RetryStrategy + + // Internal: This should never be used and is not supported. + Internal struct { + AccessDeleted bool + } +} + +// MutateIn performs a set of subdocument mutations on the document specified by id. +func (c *Collection) MutateIn(id string, ops []MutateInSpec, opts *MutateInOptions) (mutOut *MutateInResult, errOut error) { + if opts == nil { + opts = &MutateInOptions{} + } + + opm := c.newKvOpManager("MutateIn", nil) + defer opm.Finish() + + opm.SetDocumentID(id) + opm.SetRetryStrategy(opts.RetryStrategy) + opm.SetTimeout(opts.Timeout) + + if err := opm.CheckReadyForOp(); err != nil { + return nil, err + } + + return c.internalMutateIn(opm, opts.StoreSemantic, opts.Expiry, opts.Cas, ops, opts.Internal.AccessDeleted) +} + +func jsonMarshalMultiArray(in interface{}) ([]byte, error) { + out, err := json.Marshal(in) + if err != nil { + return nil, err + } + + // Assert first character is a '[' + if len(out) < 2 || out[0] != '[' { + return nil, makeInvalidArgumentsError("not a JSON array") + } + + out = out[1 : len(out)-1] + return out, nil +} + +func jsonMarshalMutateSpec(op MutateInSpec) ([]byte, memd.SubdocFlag, error) { + if op.value == nil { + return nil, memd.SubdocFlagNone, nil + } + + if macro, ok := op.value.(MutationMacro); ok { + return []byte(macro), memd.SubdocFlagExpandMacros | memd.SubdocFlagXattrPath, nil + } + + if op.multiValue { + bytes, err := jsonMarshalMultiArray(op.value) + return bytes, memd.SubdocFlagNone, err + } + + bytes, err := json.Marshal(op.value) + return bytes, memd.SubdocFlagNone, err +} + +func (c *Collection) internalMutateIn( + opm *kvOpManager, + action StoreSemantics, + expiry time.Duration, + cas Cas, + ops []MutateInSpec, + accessDeleted bool, +) (mutOut *MutateInResult, errOut error) { + var docFlags memd.SubdocDocFlag + if action == StoreSemanticsReplace { + // this is the default behaviour + } else if action == StoreSemanticsUpsert { + docFlags |= memd.SubdocDocFlagMkDoc + } else if action == StoreSemanticsInsert { + docFlags |= memd.SubdocDocFlagAddDoc + } else { + return nil, makeInvalidArgumentsError("invalid StoreSemantics value provided") + } + + if accessDeleted { + docFlags |= memd.SubdocDocFlagAccessDeleted + } + + var subdocs []gocbcore.SubDocOp + for _, op := range ops { + if op.path == "" { + switch op.op { + case memd.SubDocOpDictAdd: + return nil, makeInvalidArgumentsError("cannot specify a blank path with InsertSpec") + case memd.SubDocOpDictSet: + return nil, makeInvalidArgumentsError("cannot specify a blank path with UpsertSpec") + case memd.SubDocOpDelete: + return nil, makeInvalidArgumentsError("cannot specify a blank path with DeleteSpec") + case memd.SubDocOpReplace: + op.op = memd.SubDocOpSetDoc + default: + } + } + + etrace := c.startKvOpTrace("encode", opm.TraceSpan()) + bytes, flags, err := jsonMarshalMutateSpec(op) + etrace.Finish() + if err != nil { + return nil, err + } + + if op.createPath { + flags |= memd.SubdocFlagMkDirP + } + + if op.isXattr { + flags |= memd.SubdocFlagXattrPath + } + + subdocs = append(subdocs, gocbcore.SubDocOp{ + Op: op.op, + Flags: flags, + Path: op.path, + Value: bytes, + }) + } + + agent, err := c.getKvProvider() + if err != nil { + return nil, err + } + err = opm.Wait(agent.MutateIn(gocbcore.MutateInOptions{ + Key: opm.DocumentID(), + Flags: docFlags, + Cas: gocbcore.Cas(cas), + Ops: subdocs, + Expiry: durationToExpiry(expiry), + CollectionName: opm.CollectionName(), + ScopeName: opm.ScopeName(), + DurabilityLevel: opm.DurabilityLevel(), + DurabilityLevelTimeout: opm.DurabilityTimeout(), + RetryStrategy: opm.RetryStrategy(), + TraceContext: opm.TraceSpan(), + Deadline: opm.Deadline(), + }, func(res *gocbcore.MutateInResult, err error) { + if err != nil { + errOut = opm.EnhanceErr(err) + opm.Reject() + return + } + + mutOut = &MutateInResult{} + mutOut.cas = Cas(res.Cas) + mutOut.mt = opm.EnhanceMt(res.MutationToken) + mutOut.contents = make([]mutateInPartial, len(res.Ops)) + for i, op := range res.Ops { + mutOut.contents[i] = mutateInPartial{data: op.Value} + } + + opm.Resolve(mutOut.mt) + })) + if err != nil { + errOut = err + } + return +} diff --git a/vendor/github.com/couchbase/gocb/v2/constants.go b/vendor/github.com/couchbase/gocb/v2/constants.go new file mode 100644 index 000000000000..41e1aae9370b --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/constants.go @@ -0,0 +1,203 @@ +package gocb + +import ( + gocbcore "github.com/couchbase/gocbcore/v9" + "github.com/couchbase/gocbcore/v9/memd" +) + +const ( + goCbVersionStr = "v2.1.4" +) + +// QueryIndexType provides information on the type of indexer used for an index. +type QueryIndexType string + +const ( + // QueryIndexTypeGsi indicates that GSI was used to build the index. + QueryIndexTypeGsi QueryIndexType = "gsi" + + // QueryIndexTypeView indicates that views were used to build the index. + QueryIndexTypeView QueryIndexType = "views" +) + +// QueryStatus provides information about the current status of a query. +type QueryStatus string + +const ( + // QueryStatusRunning indicates the query is still running + QueryStatusRunning QueryStatus = "running" + + // QueryStatusSuccess indicates the query was successful. + QueryStatusSuccess QueryStatus = "success" + + // QueryStatusErrors indicates a query completed with errors. + QueryStatusErrors QueryStatus = "errors" + + // QueryStatusCompleted indicates a query has completed. + QueryStatusCompleted QueryStatus = "completed" + + // QueryStatusStopped indicates a query has been stopped. + QueryStatusStopped QueryStatus = "stopped" + + // QueryStatusTimeout indicates a query timed out. + QueryStatusTimeout QueryStatus = "timeout" + + // QueryStatusClosed indicates that a query was closed. + QueryStatusClosed QueryStatus = "closed" + + // QueryStatusFatal indicates that a query ended with a fatal error. + QueryStatusFatal QueryStatus = "fatal" + + // QueryStatusAborted indicates that a query was aborted. + QueryStatusAborted QueryStatus = "aborted" + + // QueryStatusUnknown indicates that the query status is unknown. + QueryStatusUnknown QueryStatus = "unknown" +) + +// ServiceType specifies a particular Couchbase service type. +type ServiceType gocbcore.ServiceType + +const ( + // ServiceTypeManagement represents a management service. + ServiceTypeManagement ServiceType = ServiceType(gocbcore.MgmtService) + + // ServiceTypeKeyValue represents a memcached service. + ServiceTypeKeyValue ServiceType = ServiceType(gocbcore.MemdService) + + // ServiceTypeViews represents a views service. + ServiceTypeViews ServiceType = ServiceType(gocbcore.CapiService) + + // ServiceTypeQuery represents a query service. + ServiceTypeQuery ServiceType = ServiceType(gocbcore.N1qlService) + + // ServiceTypeSearch represents a full-text-search service. + ServiceTypeSearch ServiceType = ServiceType(gocbcore.FtsService) + + // ServiceTypeAnalytics represents an analytics service. + ServiceTypeAnalytics ServiceType = ServiceType(gocbcore.CbasService) +) + +// QueryProfileMode specifies the profiling mode to use during a query. +type QueryProfileMode string + +const ( + // QueryProfileModeNone disables query profiling + QueryProfileModeNone QueryProfileMode = "off" + + // QueryProfileModePhases includes phase profiling information in the query response + QueryProfileModePhases QueryProfileMode = "phases" + + // QueryProfileModeTimings includes timing profiling information in the query response + QueryProfileModeTimings QueryProfileMode = "timings" +) + +// SubdocFlag provides special handling flags for sub-document operations +type SubdocFlag memd.SubdocFlag + +const ( + // SubdocFlagNone indicates no special behaviours + SubdocFlagNone SubdocFlag = SubdocFlag(memd.SubdocFlagNone) + + // SubdocFlagCreatePath indicates you wish to recursively create the tree of paths + // if it does not already exist within the document. + SubdocFlagCreatePath SubdocFlag = SubdocFlag(memd.SubdocFlagMkDirP) + + // SubdocFlagXattr indicates your path refers to an extended attribute rather than the document. + SubdocFlagXattr SubdocFlag = SubdocFlag(memd.SubdocFlagXattrPath) + + // SubdocFlagUseMacros indicates that you wish macro substitution to occur on the value + SubdocFlagUseMacros SubdocFlag = SubdocFlag(memd.SubdocFlagExpandMacros) +) + +// SubdocDocFlag specifies document-level flags for a sub-document operation. +type SubdocDocFlag memd.SubdocDocFlag + +const ( + // SubdocDocFlagNone indicates no special behaviours + SubdocDocFlagNone SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagNone) + + // SubdocDocFlagMkDoc indicates that the document should be created if it does not already exist. + SubdocDocFlagMkDoc SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagMkDoc) + + // SubdocDocFlagAddDoc indices that the document should be created only if it does not already exist. + SubdocDocFlagAddDoc SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagAddDoc) + + // SubdocDocFlagAccessDeleted indicates that you wish to receive soft-deleted documents. + SubdocDocFlagAccessDeleted SubdocDocFlag = SubdocDocFlag(memd.SubdocDocFlagAccessDeleted) +) + +// DurabilityLevel specifies the level of synchronous replication to use. +type DurabilityLevel uint8 + +const ( + // DurabilityLevelMajority specifies that a mutation must be replicated (held in memory) to a majority of nodes. + DurabilityLevelMajority DurabilityLevel = iota + 1 + + // DurabilityLevelMajorityAndPersistOnMaster specifies that a mutation must be replicated (held in memory) to a + // majority of nodes and also persisted (written to disk) on the active node. + DurabilityLevelMajorityAndPersistOnMaster + + // DurabilityLevelPersistToMajority specifies that a mutation must be persisted (written to disk) to a majority + // of nodes. + DurabilityLevelPersistToMajority +) + +// MutationMacro can be supplied to MutateIn operations to perform ExpandMacros operations. +type MutationMacro string + +const ( + // MutationMacroCAS can be used to tell the server to use the CAS macro. + MutationMacroCAS MutationMacro = "\"${Mutation.CAS}\"" + + // MutationMacroSeqNo can be used to tell the server to use the seqno macro. + MutationMacroSeqNo MutationMacro = "\"${Mutation.seqno}\"" + + // MutationMacroValueCRC32c can be used to tell the server to use the value_crc32c macro. + MutationMacroValueCRC32c MutationMacro = "\"${Mutation.value_crc32c}\"" +) + +// ClusterState specifies the current state of the cluster +type ClusterState uint + +const ( + // ClusterStateOnline indicates that all nodes are online and reachable. + ClusterStateOnline ClusterState = iota + 1 + + // ClusterStateDegraded indicates that all services will function, but possibly not optimally. + ClusterStateDegraded + + // ClusterStateOffline indicates that no nodes were reachable. + ClusterStateOffline +) + +// EndpointState specifies the current state of an endpoint. +type EndpointState uint + +const ( + // EndpointStateDisconnected indicates the endpoint socket is unreachable. + EndpointStateDisconnected EndpointState = iota + 1 + + // EndpointStateConnecting indicates the endpoint socket is connecting. + EndpointStateConnecting + + // EndpointStateConnected indicates the endpoint socket is connected and ready. + EndpointStateConnected + + // EndpointStateDisconnecting indicates the endpoint socket is disconnecting. + EndpointStateDisconnecting +) + +// PingState specifies the result of the ping operation +type PingState uint + +const ( + // PingStateOk indicates that the ping operation was successful. + PingStateOk PingState = iota + 1 + + // PingStateTimeout indicates that the ping operation timed out. + PingStateTimeout + + // PingStateError indicates that the ping operation failed. + PingStateError +) diff --git a/vendor/github.com/couchbase/gocb/v2/constants_str.go b/vendor/github.com/couchbase/gocb/v2/constants_str.go new file mode 100644 index 000000000000..2cbb79077df8 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/constants_str.go @@ -0,0 +1,57 @@ +package gocb + +func serviceTypeToString(service ServiceType) string { + switch service { + case ServiceTypeManagement: + return "mgmt" + case ServiceTypeKeyValue: + return "kv" + case ServiceTypeViews: + return "views" + case ServiceTypeQuery: + return "query" + case ServiceTypeSearch: + return "search" + case ServiceTypeAnalytics: + return "analytics" + } + return "" +} + +func clusterStateToString(state ClusterState) string { + switch state { + case ClusterStateOnline: + return "online" + case ClusterStateDegraded: + return "degraded" + case ClusterStateOffline: + return "offline" + } + return "" +} + +func endpointStateToString(state EndpointState) string { + switch state { + case EndpointStateDisconnected: + return "disconnected" + case EndpointStateConnecting: + return "connecting" + case EndpointStateConnected: + return "connected" + case EndpointStateDisconnecting: + return "disconnecting" + } + return "" +} + +func pingStateToString(state PingState) string { + switch state { + case PingStateOk: + return "ok" + case PingStateTimeout: + return "timeout" + case PingStateError: + return "error" + } + return "" +} diff --git a/vendor/github.com/couchbase/gocb/v2/error.go b/vendor/github.com/couchbase/gocb/v2/error.go new file mode 100644 index 000000000000..39ad409923aa --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error.go @@ -0,0 +1,299 @@ +package gocb + +import ( + "errors" + "fmt" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type wrappedError struct { + Message string + InnerError error +} + +func (e wrappedError) Error() string { + return fmt.Sprintf("%s: %s", e.Message, e.InnerError.Error()) +} + +func (e wrappedError) Unwrap() error { + return e.InnerError +} + +func wrapError(err error, message string) error { + return wrappedError{ + Message: message, + InnerError: err, + } +} + +type invalidArgumentsError struct { + message string +} + +func (e invalidArgumentsError) Error() string { + return fmt.Sprintf("invalid arguments: %s", e.message) +} + +func (e invalidArgumentsError) Unwrap() error { + return ErrInvalidArgument +} + +func makeInvalidArgumentsError(message string) error { + return invalidArgumentsError{ + message: message, + } +} + +// Shared Error Definitions RFC#58@15 +var ( + // ErrTimeout occurs when an operation does not receive a response in a timely manner. + ErrTimeout = gocbcore.ErrTimeout + + // ErrRequestCanceled occurs when an operation has been canceled. + ErrRequestCanceled = gocbcore.ErrRequestCanceled + + // ErrInvalidArgument occurs when an invalid argument is provided for an operation. + ErrInvalidArgument = gocbcore.ErrInvalidArgument + + // ErrServiceNotAvailable occurs when the requested service is not available. + ErrServiceNotAvailable = gocbcore.ErrServiceNotAvailable + + // ErrInternalServerFailure occurs when the server encounters an internal server error. + ErrInternalServerFailure = gocbcore.ErrInternalServerFailure + + // ErrAuthenticationFailure occurs when authentication has failed. + ErrAuthenticationFailure = gocbcore.ErrAuthenticationFailure + + // ErrTemporaryFailure occurs when an operation has failed for a reason that is temporary. + ErrTemporaryFailure = gocbcore.ErrTemporaryFailure + + // ErrParsingFailure occurs when a query has failed to be parsed by the server. + ErrParsingFailure = gocbcore.ErrParsingFailure + + // ErrCasMismatch occurs when an operation has been performed with a cas value that does not the value on the server. + ErrCasMismatch = gocbcore.ErrCasMismatch + + // ErrBucketNotFound occurs when the requested bucket could not be found. + ErrBucketNotFound = gocbcore.ErrBucketNotFound + + // ErrCollectionNotFound occurs when the requested collection could not be found. + ErrCollectionNotFound = gocbcore.ErrCollectionNotFound + + // ErrEncodingFailure occurs when encoding of a value failed. + ErrEncodingFailure = gocbcore.ErrEncodingFailure + + // ErrDecodingFailure occurs when decoding of a value failed. + ErrDecodingFailure = gocbcore.ErrDecodingFailure + + // ErrUnsupportedOperation occurs when an operation that is unsupported or unknown is performed against the server. + ErrUnsupportedOperation = gocbcore.ErrUnsupportedOperation + + // ErrAmbiguousTimeout occurs when an operation does not receive a response in a timely manner for a reason that + // + ErrAmbiguousTimeout = gocbcore.ErrAmbiguousTimeout + + // ErrAmbiguousTimeout occurs when an operation does not receive a response in a timely manner for a reason that + // it can be safely established that + ErrUnambiguousTimeout = gocbcore.ErrUnambiguousTimeout + + // ErrFeatureNotAvailable occurs when an operation is performed on a bucket which does not support it. + ErrFeatureNotAvailable = gocbcore.ErrFeatureNotAvailable + + // ErrScopeNotFound occurs when the requested scope could not be found. + ErrScopeNotFound = gocbcore.ErrScopeNotFound + + // ErrIndexNotFound occurs when the requested index could not be found. + ErrIndexNotFound = gocbcore.ErrIndexNotFound + + // ErrIndexExists occurs when creating an index that already exists. + ErrIndexExists = gocbcore.ErrIndexExists +) + +// Key Value Error Definitions RFC#58@15 +var ( + // ErrDocumentNotFound occurs when the requested document could not be found. + ErrDocumentNotFound = gocbcore.ErrDocumentNotFound + + // ErrDocumentUnretrievable occurs when GetAnyReplica cannot find the document on any replica. + ErrDocumentUnretrievable = gocbcore.ErrDocumentUnretrievable + + // ErrDocumentLocked occurs when a mutation operation is attempted against a document that is locked. + ErrDocumentLocked = gocbcore.ErrDocumentLocked + + // ErrValueTooLarge occurs when a document has gone over the maximum size allowed by the server. + ErrValueTooLarge = gocbcore.ErrValueTooLarge + + // ErrDocumentExists occurs when an attempt is made to insert a document but a document with that key already exists. + ErrDocumentExists = gocbcore.ErrDocumentExists + + // ErrValueNotJSON occurs when a sub-document operation is performed on a + // document which is not JSON. + ErrValueNotJSON = gocbcore.ErrValueNotJSON + + // ErrDurabilityLevelNotAvailable occurs when an invalid durability level was requested. + ErrDurabilityLevelNotAvailable = gocbcore.ErrDurabilityLevelNotAvailable + + // ErrDurabilityImpossible occurs when a request is performed with impossible + // durability level requirements. + ErrDurabilityImpossible = gocbcore.ErrDurabilityImpossible + + // ErrDurabilityAmbiguous occurs when an SyncWrite does not complete in the specified + // time and the result is ambiguous. + ErrDurabilityAmbiguous = gocbcore.ErrDurabilityAmbiguous + + // ErrDurableWriteInProgress occurs when an attempt is made to write to a key that has + // a SyncWrite pending. + ErrDurableWriteInProgress = gocbcore.ErrDurableWriteInProgress + + // ErrDurableWriteReCommitInProgress occurs when an SyncWrite is being recommitted. + ErrDurableWriteReCommitInProgress = gocbcore.ErrDurableWriteReCommitInProgress + + // ErrMutationLost occurs when a mutation was lost. + ErrMutationLost = gocbcore.ErrMutationLost + + // ErrPathNotFound occurs when a sub-document operation targets a path + // which does not exist in the specified document. + ErrPathNotFound = gocbcore.ErrPathNotFound + + // ErrPathMismatch occurs when a sub-document operation specifies a path + // which does not match the document structure (field access on an array). + ErrPathMismatch = gocbcore.ErrPathMismatch + + // ErrPathInvalid occurs when a sub-document path could not be parsed. + ErrPathInvalid = gocbcore.ErrPathInvalid + + // ErrPathTooBig occurs when a sub-document path is too big. + ErrPathTooBig = gocbcore.ErrPathTooBig + + // ErrPathTooDeep occurs when an operation would cause a document to be + // nested beyond the depth limits allowed by the sub-document specification. + ErrPathTooDeep = gocbcore.ErrPathTooDeep + + // ErrValueTooDeep occurs when a sub-document operation specifies a value + // which is deeper than the depth limits of the sub-document specification. + ErrValueTooDeep = gocbcore.ErrValueTooDeep + + // ErrValueInvalid occurs when a sub-document operation could not insert. + ErrValueInvalid = gocbcore.ErrValueInvalid + + // ErrDocumentNotJSON occurs when a sub-document operation is performed on a + // document which is not JSON. + ErrDocumentNotJSON = gocbcore.ErrDocumentNotJSON + + // ErrNumberTooBig occurs when a sub-document operation is performed with + // a bad range. + ErrNumberTooBig = gocbcore.ErrNumberTooBig + + // ErrDeltaInvalid occurs when a sub-document counter operation is performed + // and the specified delta is not valid. + ErrDeltaInvalid = gocbcore.ErrDeltaInvalid + + // ErrPathExists occurs when a sub-document operation expects a path not + // to exists, but the path was found in the document. + ErrPathExists = gocbcore.ErrPathExists + + // ErrXattrUnknownMacro occurs when an invalid macro value is specified. + ErrXattrUnknownMacro = gocbcore.ErrXattrUnknownMacro + + // ErrXattrInvalidFlagCombo occurs when an invalid set of + // extended-attribute flags is passed to a sub-document operation. + ErrXattrInvalidFlagCombo = gocbcore.ErrXattrInvalidFlagCombo + + // ErrXattrInvalidKeyCombo occurs when an invalid set of key operations + // are specified for a extended-attribute sub-document operation. + ErrXattrInvalidKeyCombo = gocbcore.ErrXattrInvalidKeyCombo + + // ErrXattrUnknownVirtualAttribute occurs when an invalid virtual attribute is specified. + ErrXattrUnknownVirtualAttribute = gocbcore.ErrXattrUnknownVirtualAttribute + + // ErrXattrCannotModifyVirtualAttribute occurs when a mutation is attempted upon + // a virtual attribute (which are immutable by definition). + ErrXattrCannotModifyVirtualAttribute = gocbcore.ErrXattrCannotModifyVirtualAttribute + + // ErrXattrInvalidOrder occurs when a set key key operations are specified for a extended-attribute sub-document + // operation in the incorrect order. + ErrXattrInvalidOrder = gocbcore.ErrXattrInvalidOrder +) + +// Query Error Definitions RFC#58@15 +var ( + // ErrPlanningFailure occurs when the query service was unable to create a query plan. + ErrPlanningFailure = gocbcore.ErrPlanningFailure + + // ErrIndexFailure occurs when there was an issue with the index specified. + ErrIndexFailure = gocbcore.ErrIndexFailure + + // ErrPreparedStatementFailure occurs when there was an issue with the prepared statement. + ErrPreparedStatementFailure = gocbcore.ErrPreparedStatementFailure +) + +// Analytics Error Definitions RFC#58@15 +var ( + // ErrCompilationFailure occurs when there was an issue executing the analytics query because it could not + // be compiled. + ErrCompilationFailure = gocbcore.ErrCompilationFailure + + // ErrJobQueueFull occurs when the analytics service job queue is full. + ErrJobQueueFull = gocbcore.ErrJobQueueFull + + // ErrDatasetNotFound occurs when the analytics dataset requested could not be found. + ErrDatasetNotFound = gocbcore.ErrDatasetNotFound + + // ErrDataverseNotFound occurs when the analytics dataverse requested could not be found. + ErrDataverseNotFound = gocbcore.ErrDataverseNotFound + + // ErrDatasetExists occurs when creating an analytics dataset failed because it already exists. + ErrDatasetExists = gocbcore.ErrDatasetExists + + // ErrDataverseExists occurs when creating an analytics dataverse failed because it already exists. + ErrDataverseExists = gocbcore.ErrDataverseExists + + // ErrLinkNotFound occurs when the analytics link requested could not be found. + ErrLinkNotFound = gocbcore.ErrLinkNotFound +) + +// Search Error Definitions RFC#58@15 +var () + +// View Error Definitions RFC#58@15 +var ( + // ErrViewNotFound occurs when the view requested could not be found. + ErrViewNotFound = gocbcore.ErrViewNotFound + + // ErrDesignDocumentNotFound occurs when the design document requested could not be found. + ErrDesignDocumentNotFound = gocbcore.ErrDesignDocumentNotFound +) + +// Management Error Definitions RFC#58@15 +var ( + // ErrCollectionExists occurs when creating a collection failed because it already exists. + ErrCollectionExists = gocbcore.ErrCollectionExists + + // ErrScopeExists occurs when creating a scope failed because it already exists. + ErrScopeExists = gocbcore.ErrScopeExists + + // ErrUserNotFound occurs when the user requested could not be found. + ErrUserNotFound = gocbcore.ErrUserNotFound + + // ErrGroupNotFound occurs when the group requested could not be found. + ErrGroupNotFound = gocbcore.ErrGroupNotFound + + // ErrBucketExists occurs when creating a bucket failed because it already exists. + ErrBucketExists = gocbcore.ErrBucketExists + + // ErrUserExists occurs when creating a user failed because it already exists. + ErrUserExists = gocbcore.ErrUserExists + + // ErrBucketNotFlushable occurs when a bucket could not be flushed because flushing is not enabled. + ErrBucketNotFlushable = gocbcore.ErrBucketNotFlushable +) + +// SDK specific error definitions +var ( + // ErrOverload occurs when too many operations are dispatched and all queues are full. + ErrOverload = gocbcore.ErrOverload + + // ErrNoResult occurs when no results are available to a query. + ErrNoResult = errors.New("no result was available") +) diff --git a/vendor/github.com/couchbase/gocb/v2/error_analytics.go b/vendor/github.com/couchbase/gocb/v2/error_analytics.go new file mode 100644 index 000000000000..7bf4fe1715c3 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_analytics.go @@ -0,0 +1,42 @@ +package gocb + +import gocbcore "github.com/couchbase/gocbcore/v9" + +// AnalyticsErrorDesc represents a specific error returned from the analytics service. +type AnalyticsErrorDesc struct { + Code uint32 + Message string +} + +func translateCoreAnalyticsErrorDesc(descs []gocbcore.AnalyticsErrorDesc) []AnalyticsErrorDesc { + descsOut := make([]AnalyticsErrorDesc, len(descs)) + for descIdx, desc := range descs { + descsOut[descIdx] = AnalyticsErrorDesc{ + Code: desc.Code, + Message: desc.Message, + } + } + return descsOut +} + +// AnalyticsError is the error type of all analytics query errors. +// UNCOMMITTED: This API may change in the future. +type AnalyticsError struct { + InnerError error `json:"-"` + Statement string `json:"statement,omitempty"` + ClientContextID string `json:"client_context_id,omitempty"` + Errors []AnalyticsErrorDesc `json:"errors,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e AnalyticsError) Error() string { + return e.InnerError.Error() + " | " + serializeWrappedError(e) +} + +// Unwrap returns the underlying cause for this error. +func (e AnalyticsError) Unwrap() error { + return e.InnerError +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_http.go b/vendor/github.com/couchbase/gocb/v2/error_http.go new file mode 100644 index 000000000000..5b0716df4ba7 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_http.go @@ -0,0 +1,72 @@ +package gocb + +import ( + gocbcore "github.com/couchbase/gocbcore/v9" + "github.com/pkg/errors" +) + +// HTTPError is the error type of management HTTP errors. +// UNCOMMITTED: This API may change in the future. +type HTTPError struct { + InnerError error `json:"-"` + UniqueID string `json:"unique_id,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e HTTPError) Error() string { + return e.InnerError.Error() + " | " + serializeWrappedError(e) +} + +// Unwrap returns the underlying cause for this error. +func (e HTTPError) Unwrap() error { + return e.InnerError +} + +func makeGenericHTTPError(baseErr error, req *gocbcore.HTTPRequest, resp *gocbcore.HTTPResponse) error { + if baseErr == nil { + logErrorf("makeGenericHTTPError got an empty error") + baseErr = errors.New("unknown error") + } + + err := HTTPError{ + InnerError: baseErr, + } + + if req != nil { + err.UniqueID = req.UniqueID + } + + if resp != nil { + err.Endpoint = resp.Endpoint + } + + return err +} + +func makeGenericMgmtError(baseErr error, req *mgmtRequest, resp *mgmtResponse) error { + if baseErr == nil { + logErrorf("makeGenericMgmtError got an empty error") + baseErr = errors.New("unknown error") + } + + err := HTTPError{ + InnerError: baseErr, + } + + if req != nil { + err.UniqueID = req.UniqueID + } + + if resp != nil { + err.Endpoint = resp.Endpoint + } + + return err +} + +func makeMgmtBadStatusError(message string, req *mgmtRequest, resp *mgmtResponse) error { + return makeGenericMgmtError(errors.New(message), req, resp) +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_keyvalue.go b/vendor/github.com/couchbase/gocb/v2/error_keyvalue.go new file mode 100644 index 000000000000..d2c7cb2009c6 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_keyvalue.go @@ -0,0 +1,34 @@ +package gocb + +import "github.com/couchbase/gocbcore/v9/memd" + +// KeyValueError wraps key-value errors that occur within the SDK. +// UNCOMMITTED: This API may change in the future. +type KeyValueError struct { + InnerError error `json:"-"` + StatusCode memd.StatusCode `json:"status_code,omitempty"` + BucketName string `json:"bucket,omitempty"` + ScopeName string `json:"scope,omitempty"` + CollectionName string `json:"collection,omitempty"` + CollectionID uint32 `json:"collection_id,omitempty"` + ErrorName string `json:"error_name,omitempty"` + ErrorDescription string `json:"error_description,omitempty"` + Opaque uint32 `json:"opaque,omitempty"` + Context string `json:"context,omitempty"` + Ref string `json:"ref,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` + LastDispatchedTo string `json:"last_dispatched_to,omitempty"` + LastDispatchedFrom string `json:"last_dispatched_from,omitempty"` + LastConnectionID string `json:"last_connection_id,omitempty"` +} + +// Error returns the string representation of a kv error. +func (e KeyValueError) Error() string { + return e.InnerError.Error() + " | " + serializeWrappedError(e) +} + +// Unwrap returns the underlying reason for the error +func (e KeyValueError) Unwrap() error { + return e.InnerError +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_query.go b/vendor/github.com/couchbase/gocb/v2/error_query.go new file mode 100644 index 000000000000..ba205c85a424 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_query.go @@ -0,0 +1,42 @@ +package gocb + +import gocbcore "github.com/couchbase/gocbcore/v9" + +// QueryErrorDesc represents a specific error returned from the query service. +type QueryErrorDesc struct { + Code uint32 + Message string +} + +func translateCoreQueryErrorDesc(descs []gocbcore.N1QLErrorDesc) []QueryErrorDesc { + descsOut := make([]QueryErrorDesc, len(descs)) + for descIdx, desc := range descs { + descsOut[descIdx] = QueryErrorDesc{ + Code: desc.Code, + Message: desc.Message, + } + } + return descsOut +} + +// QueryError is the error type of all query errors. +// UNCOMMITTED: This API may change in the future. +type QueryError struct { + InnerError error `json:"-"` + Statement string `json:"statement,omitempty"` + ClientContextID string `json:"client_context_id,omitempty"` + Errors []QueryErrorDesc `json:"errors,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e QueryError) Error() string { + return e.InnerError.Error() + " | " + serializeWrappedError(e) +} + +// Unwrap returns the underlying cause for this error. +func (e QueryError) Unwrap() error { + return e.InnerError +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_search.go b/vendor/github.com/couchbase/gocb/v2/error_search.go new file mode 100644 index 000000000000..bfcb5a2802c3 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_search.go @@ -0,0 +1,23 @@ +package gocb + +// SearchError is the error type of all search query errors. +// UNCOMMITTED: This API may change in the future. +type SearchError struct { + InnerError error `json:"-"` + Query interface{} `json:"query,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` + ErrorText string `json:"error_text"` + IndexName string `json:"index_name,omitempty"` +} + +// Error returns the string representation of this error. +func (e SearchError) Error() string { + return e.InnerError.Error() + " | " + serializeWrappedError(e) +} + +// Unwrap returns the underlying cause for this error. +func (e SearchError) Unwrap() error { + return e.InnerError +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_timeout.go b/vendor/github.com/couchbase/gocb/v2/error_timeout.go new file mode 100644 index 000000000000..dccdcc48e19e --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_timeout.go @@ -0,0 +1,87 @@ +package gocb + +import ( + "encoding/json" + "time" +) + +// TimeoutError wraps timeout errors that occur within the SDK. +// UNCOMMITTED: This API may change in the future. +type TimeoutError struct { + InnerError error + OperationID string + Opaque string + TimeObserved time.Duration + RetryReasons []RetryReason + RetryAttempts uint32 + LastDispatchedTo string + LastDispatchedFrom string + LastConnectionID string +} + +type timeoutError struct { + InnerError error `json:"-"` + OperationID string `json:"s,omitempty"` + Opaque string `json:"i,omitempty"` + TimeObserved uint64 `json:"t,omitempty"` + RetryReasons []string `json:"rr,omitempty"` + RetryAttempts uint32 `json:"ra,omitempty"` + LastDispatchedTo string `json:"r,omitempty"` + LastDispatchedFrom string `json:"l,omitempty"` + LastConnectionID string `json:"c,omitempty"` +} + +// MarshalJSON implements the Marshaler interface. +func (err *TimeoutError) MarshalJSON() ([]byte, error) { + var retries []string + for _, rr := range err.RetryReasons { + retries = append(retries, rr.Description()) + } + + toMarshal := timeoutError{ + InnerError: err.InnerError, + OperationID: err.OperationID, + Opaque: err.Opaque, + TimeObserved: uint64(err.TimeObserved / time.Microsecond), + RetryReasons: retries, + RetryAttempts: err.RetryAttempts, + LastDispatchedTo: err.LastDispatchedTo, + LastDispatchedFrom: err.LastDispatchedFrom, + LastConnectionID: err.LastConnectionID, + } + + return json.Marshal(toMarshal) +} + +// UnmarshalJSON implements the Unmarshaler interface. +func (err *TimeoutError) UnmarshalJSON(data []byte) error { + var tErr *timeoutError + if err := json.Unmarshal(data, &tErr); err != nil { + return err + } + + duration := time.Duration(tErr.TimeObserved) * time.Microsecond + + // Note that we cannot reasonably unmarshal the retry reasons + err.OperationID = tErr.OperationID + err.Opaque = tErr.Opaque + err.TimeObserved = duration + err.RetryAttempts = tErr.RetryAttempts + err.LastDispatchedTo = tErr.LastDispatchedTo + err.LastDispatchedFrom = tErr.LastDispatchedFrom + err.LastConnectionID = tErr.LastConnectionID + + return nil +} + +func (err TimeoutError) Error() string { + if err.InnerError == nil { + return serializeWrappedError(err) + } + return err.InnerError.Error() + " | " + serializeWrappedError(err) +} + +// Unwrap returns the underlying reason for the error +func (err TimeoutError) Unwrap() error { + return err.InnerError +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_view.go b/vendor/github.com/couchbase/gocb/v2/error_view.go new file mode 100644 index 000000000000..7c7226aed935 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_view.go @@ -0,0 +1,42 @@ +package gocb + +import gocbcore "github.com/couchbase/gocbcore/v9" + +// ViewErrorDesc represents a specific error returned from the views service. +type ViewErrorDesc struct { + SourceNode string + Message string +} + +func translateCoreViewErrorDesc(descs []gocbcore.ViewQueryErrorDesc) []ViewErrorDesc { + descsOut := make([]ViewErrorDesc, len(descs)) + for descIdx, desc := range descs { + descsOut[descIdx] = ViewErrorDesc{ + SourceNode: desc.SourceNode, + Message: desc.Message, + } + } + return descsOut +} + +// ViewError is the error type of all view query errors. +// UNCOMMITTED: This API may change in the future. +type ViewError struct { + InnerError error `json:"-"` + DesignDocumentName string `json:"design_document_name,omitempty"` + ViewName string `json:"view_name,omitempty"` + Errors []ViewErrorDesc `json:"errors,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e ViewError) Error() string { + return e.InnerError.Error() + " | " + serializeWrappedError(e) +} + +// Unwrap returns the underlying cause for this error. +func (e ViewError) Unwrap() error { + return e.InnerError +} diff --git a/vendor/github.com/couchbase/gocb/v2/error_wrapping.go b/vendor/github.com/couchbase/gocb/v2/error_wrapping.go new file mode 100644 index 000000000000..6f8c56c4b153 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/error_wrapping.go @@ -0,0 +1,130 @@ +package gocb + +import ( + "encoding/json" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +func serializeWrappedError(err error) string { + errBytes, serErr := json.Marshal(err) + if serErr != nil { + logErrorf("failed to serialize error to json: %s", serErr.Error()) + } + return string(errBytes) +} + +func maybeEnhanceCoreErr(err error) error { + if kvErr, ok := err.(*gocbcore.KeyValueError); ok { + return &KeyValueError{ + InnerError: kvErr.InnerError, + StatusCode: kvErr.StatusCode, + BucketName: kvErr.BucketName, + ScopeName: kvErr.ScopeName, + CollectionName: kvErr.CollectionName, + CollectionID: kvErr.CollectionID, + ErrorName: kvErr.ErrorName, + ErrorDescription: kvErr.ErrorDescription, + Opaque: kvErr.Opaque, + Context: kvErr.Context, + Ref: kvErr.Ref, + RetryReasons: translateCoreRetryReasons(kvErr.RetryReasons), + RetryAttempts: kvErr.RetryAttempts, + LastDispatchedTo: kvErr.LastDispatchedTo, + LastDispatchedFrom: kvErr.LastDispatchedFrom, + LastConnectionID: kvErr.LastConnectionID, + } + } + if viewErr, ok := err.(*gocbcore.ViewError); ok { + return &ViewError{ + InnerError: viewErr.InnerError, + DesignDocumentName: viewErr.DesignDocumentName, + ViewName: viewErr.ViewName, + Errors: translateCoreViewErrorDesc(viewErr.Errors), + Endpoint: viewErr.Endpoint, + RetryReasons: translateCoreRetryReasons(viewErr.RetryReasons), + RetryAttempts: viewErr.RetryAttempts, + } + } + if queryErr, ok := err.(*gocbcore.N1QLError); ok { + return &QueryError{ + InnerError: queryErr.InnerError, + Statement: queryErr.Statement, + ClientContextID: queryErr.ClientContextID, + Errors: translateCoreQueryErrorDesc(queryErr.Errors), + Endpoint: queryErr.Endpoint, + RetryReasons: translateCoreRetryReasons(queryErr.RetryReasons), + RetryAttempts: queryErr.RetryAttempts, + } + } + if analyticsErr, ok := err.(*gocbcore.AnalyticsError); ok { + return &AnalyticsError{ + InnerError: analyticsErr.InnerError, + Statement: analyticsErr.Statement, + ClientContextID: analyticsErr.ClientContextID, + Errors: translateCoreAnalyticsErrorDesc(analyticsErr.Errors), + Endpoint: analyticsErr.Endpoint, + RetryReasons: translateCoreRetryReasons(analyticsErr.RetryReasons), + RetryAttempts: analyticsErr.RetryAttempts, + } + } + if searchErr, ok := err.(*gocbcore.SearchError); ok { + return &SearchError{ + InnerError: searchErr.InnerError, + Query: searchErr.Query, + Endpoint: searchErr.Endpoint, + RetryReasons: translateCoreRetryReasons(searchErr.RetryReasons), + RetryAttempts: searchErr.RetryAttempts, + ErrorText: searchErr.ErrorText, + IndexName: searchErr.IndexName, + } + } + if httpErr, ok := err.(*gocbcore.HTTPError); ok { + return &HTTPError{ + InnerError: httpErr.InnerError, + UniqueID: httpErr.UniqueID, + Endpoint: httpErr.Endpoint, + RetryReasons: translateCoreRetryReasons(httpErr.RetryReasons), + RetryAttempts: httpErr.RetryAttempts, + } + } + + if timeoutErr, ok := err.(*gocbcore.TimeoutError); ok { + return &TimeoutError{ + InnerError: timeoutErr.InnerError, + OperationID: timeoutErr.OperationID, + Opaque: timeoutErr.Opaque, + TimeObserved: timeoutErr.TimeObserved, + RetryReasons: translateCoreRetryReasons(timeoutErr.RetryReasons), + RetryAttempts: timeoutErr.RetryAttempts, + LastDispatchedTo: timeoutErr.LastDispatchedTo, + LastDispatchedFrom: timeoutErr.LastDispatchedFrom, + LastConnectionID: timeoutErr.LastConnectionID, + } + } + return err +} + +func maybeEnhanceKVErr(err error, bucketName, scopeName, collName, docKey string) error { + return maybeEnhanceCoreErr(err) +} + +func maybeEnhanceCollKVErr(err error, bucket kvProvider, coll *Collection, docKey string) error { + return maybeEnhanceKVErr(err, coll.bucketName(), coll.Name(), coll.ScopeName(), docKey) +} + +func maybeEnhanceViewError(err error) error { + return maybeEnhanceCoreErr(err) +} + +func maybeEnhanceQueryError(err error) error { + return maybeEnhanceCoreErr(err) +} + +func maybeEnhanceAnalyticsError(err error) error { + return maybeEnhanceCoreErr(err) +} + +func maybeEnhanceSearchError(err error) error { + return maybeEnhanceCoreErr(err) +} diff --git a/vendor/github.com/couchbase/gocb/v2/go.mod b/vendor/github.com/couchbase/gocb/v2/go.mod new file mode 100644 index 000000000000..fe10769104dc --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/go.mod @@ -0,0 +1,12 @@ +module github.com/couchbase/gocb/v2 + +require ( + github.com/couchbase/gocbcore/v9 v9.0.4 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/uuid v1.1.1 + github.com/pkg/errors v0.9.1 + github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/testify v1.5.1 +) + +go 1.13 diff --git a/vendor/github.com/couchbase/gocb/v2/go.sum b/vendor/github.com/couchbase/gocb/v2/go.sum new file mode 100644 index 000000000000..5abea6757694 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/go.sum @@ -0,0 +1,24 @@ +github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8= +github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/couchbase/gocb/v2/kvopmanager.go b/vendor/github.com/couchbase/gocb/v2/kvopmanager.go new file mode 100644 index 000000000000..04a9d913c780 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/kvopmanager.go @@ -0,0 +1,273 @@ +package gocb + +import ( + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" + "github.com/couchbase/gocbcore/v9/memd" + + "github.com/pkg/errors" +) + +type kvOpManager struct { + parent *Collection + signal chan struct{} + + err error + wasResolved bool + mutationToken *MutationToken + + span requestSpan + documentID string + transcoder Transcoder + timeout time.Duration + deadline time.Time + bytes []byte + flags uint32 + persistTo uint + replicateTo uint + durabilityLevel DurabilityLevel + retryStrategy *retryStrategyWrapper + cancelCh chan struct{} +} + +func (m *kvOpManager) getTimeout() time.Duration { + if m.timeout > 0 { + return m.timeout + } + + defaultTimeout := m.parent.timeoutsConfig.KVTimeout + if m.durabilityLevel > DurabilityLevelMajority || m.persistTo > 0 { + defaultTimeout = m.parent.timeoutsConfig.KVDurableTimeout + } + + return defaultTimeout +} + +func (m *kvOpManager) SetDocumentID(id string) { + m.documentID = id +} + +func (m *kvOpManager) SetCancelCh(cancelCh chan struct{}) { + m.cancelCh = cancelCh +} + +func (m *kvOpManager) SetTimeout(timeout time.Duration) { + m.timeout = timeout +} + +func (m *kvOpManager) SetTranscoder(transcoder Transcoder) { + if transcoder == nil { + transcoder = m.parent.transcoder + } + m.transcoder = transcoder +} + +func (m *kvOpManager) SetValue(val interface{}) { + if m.err != nil { + return + } + if m.transcoder == nil { + m.err = errors.New("Expected a transcoder to be specified first") + return + } + + espan := m.parent.startKvOpTrace("encode", m.span) + defer espan.Finish() + + bytes, flags, err := m.transcoder.Encode(val) + if err != nil { + m.err = err + return + } + + m.bytes = bytes + m.flags = flags +} + +func (m *kvOpManager) SetDuraOptions(persistTo, replicateTo uint, level DurabilityLevel) { + if persistTo != 0 || replicateTo != 0 { + if !m.parent.useMutationTokens { + m.err = makeInvalidArgumentsError("cannot use observe based durability without mutation tokens") + return + } + + if level > 0 { + m.err = makeInvalidArgumentsError("cannot mix observe based durability and synchronous durability") + return + } + } + + m.persistTo = persistTo + m.replicateTo = replicateTo + m.durabilityLevel = level +} + +func (m *kvOpManager) SetRetryStrategy(retryStrategy RetryStrategy) { + wrapper := m.parent.retryStrategyWrapper + if retryStrategy != nil { + wrapper = newRetryStrategyWrapper(retryStrategy) + } + m.retryStrategy = wrapper +} + +func (m *kvOpManager) Finish() { + m.span.Finish() +} + +func (m *kvOpManager) TraceSpan() requestSpan { + return m.span +} + +func (m *kvOpManager) DocumentID() []byte { + return []byte(m.documentID) +} + +func (m *kvOpManager) CollectionName() string { + return m.parent.name() +} + +func (m *kvOpManager) ScopeName() string { + return m.parent.ScopeName() +} + +func (m *kvOpManager) BucketName() string { + return m.parent.bucketName() +} + +func (m *kvOpManager) ValueBytes() []byte { + return m.bytes +} + +func (m *kvOpManager) ValueFlags() uint32 { + return m.flags +} + +func (m *kvOpManager) Transcoder() Transcoder { + return m.transcoder +} + +func (m *kvOpManager) DurabilityLevel() memd.DurabilityLevel { + return memd.DurabilityLevel(m.durabilityLevel) +} + +func (m *kvOpManager) DurabilityTimeout() time.Duration { + timeout := m.getTimeout() + duraTimeout := timeout * 10 / 9 + return duraTimeout +} + +func (m *kvOpManager) Deadline() time.Time { + if m.deadline.IsZero() { + timeout := m.getTimeout() + m.deadline = time.Now().Add(timeout) + } + + return m.deadline +} + +func (m *kvOpManager) RetryStrategy() *retryStrategyWrapper { + return m.retryStrategy +} + +func (m *kvOpManager) CheckReadyForOp() error { + if m.err != nil { + return m.err + } + + if m.getTimeout() == 0 { + return errors.New("op manager had no timeout specified") + } + + return nil +} + +func (m *kvOpManager) NeedsObserve() bool { + return m.persistTo > 0 || m.replicateTo > 0 +} + +func (m *kvOpManager) EnhanceErr(err error) error { + return maybeEnhanceCollKVErr(err, nil, m.parent, m.documentID) +} + +func (m *kvOpManager) EnhanceMt(token gocbcore.MutationToken) *MutationToken { + if token.VbUUID != 0 { + return &MutationToken{ + token: token, + bucketName: m.BucketName(), + } + } + + return nil +} + +func (m *kvOpManager) Reject() { + m.signal <- struct{}{} +} + +func (m *kvOpManager) Resolve(token *MutationToken) { + m.wasResolved = true + m.mutationToken = token + m.signal <- struct{}{} +} + +func (m *kvOpManager) Wait(op gocbcore.PendingOp, err error) error { + if err != nil { + return err + } + if m.err != nil { + op.Cancel() + } + + select { + case <-m.signal: + // Good to go + case <-m.cancelCh: + op.Cancel() + <-m.signal + } + + if m.wasResolved && (m.persistTo > 0 || m.replicateTo > 0) { + if m.mutationToken == nil { + return errors.New("expected a mutation token") + } + + return m.parent.waitForDurability( + m.span, + m.documentID, + m.mutationToken.token, + m.replicateTo, + m.persistTo, + m.Deadline(), + m.cancelCh, + ) + } + + return nil +} + +func (c *Collection) newKvOpManager(opName string, tracectx requestSpanContext) *kvOpManager { + span := c.startKvOpTrace(opName, tracectx) + + return &kvOpManager{ + parent: c, + signal: make(chan struct{}, 1), + span: span, + } +} + +func durationToExpiry(dura time.Duration) uint32 { + // If the duration is 0, that indicates never-expires + if dura == 0 { + return 0 + } + + // If the duration is less than one second, we must force the + // value to 1 to avoid accidentally making it never expire. + if dura < 1*time.Second { + return 1 + } + + // Translate into a uint32 in seconds. + return uint32(dura / time.Second) +} diff --git a/vendor/github.com/couchbase/gocb/v2/logging.go b/vendor/github.com/couchbase/gocb/v2/logging.go new file mode 100644 index 000000000000..d9faae5257ec --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/logging.go @@ -0,0 +1,148 @@ +package gocb + +import ( + "fmt" + "log" + "strings" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +// LogLevel specifies the severity of a log message. +type LogLevel gocbcore.LogLevel + +// Various logging levels (or subsystems) which can categorize the message. +// Currently these are ordered in decreasing severity. +const ( + LogError LogLevel = LogLevel(gocbcore.LogError) + LogWarn LogLevel = LogLevel(gocbcore.LogWarn) + LogInfo LogLevel = LogLevel(gocbcore.LogInfo) + LogDebug LogLevel = LogLevel(gocbcore.LogDebug) + LogTrace LogLevel = LogLevel(gocbcore.LogTrace) + LogSched LogLevel = LogLevel(gocbcore.LogSched) + LogMaxVerbosity LogLevel = LogLevel(gocbcore.LogMaxVerbosity) +) + +// LogRedactLevel specifies the degree with which to redact the logs. +type LogRedactLevel uint + +const ( + // RedactNone indicates to perform no redactions + RedactNone LogRedactLevel = iota + + // RedactPartial indicates to redact all possible user-identifying information from logs. + RedactPartial + + // RedactFull indicates to fully redact all possible identifying information from logs. + RedactFull +) + +// SetLogRedactionLevel specifies the level with which logs should be redacted. +func SetLogRedactionLevel(level LogRedactLevel) { + globalLogRedactionLevel = level + gocbcore.SetLogRedactionLevel(gocbcore.LogRedactLevel(level)) +} + +// Logger defines a logging interface. You can either use one of the default loggers +// (DefaultStdioLogger(), VerboseStdioLogger()) or implement your own. +type Logger interface { + // Outputs logging information: + // level is the verbosity level + // offset is the position within the calling stack from which the message + // originated. This is useful for contextual loggers which retrieve file/line + // information. + Log(level LogLevel, offset int, format string, v ...interface{}) error +} + +var ( + globalLogger Logger + globalLogRedactionLevel LogRedactLevel +) + +type coreLogWrapper struct { + wrapped gocbcore.Logger +} + +func (wrapper coreLogWrapper) Log(level LogLevel, offset int, format string, v ...interface{}) error { + return wrapper.wrapped.Log(gocbcore.LogLevel(level), offset+2, format, v...) +} + +// DefaultStdioLogger gets the default standard I/O logger. +// gocb.SetLogger(gocb.DefaultStdioLogger()) +func DefaultStdioLogger() Logger { + return &coreLogWrapper{ + wrapped: gocbcore.DefaultStdioLogger(), + } +} + +// VerboseStdioLogger is a more verbose level of DefaultStdioLogger(). Messages +// pertaining to the scheduling of ordinary commands (and their responses) will +// also be emitted. +// gocb.SetLogger(gocb.VerboseStdioLogger()) +func VerboseStdioLogger() Logger { + return coreLogWrapper{ + wrapped: gocbcore.VerboseStdioLogger(), + } +} + +type coreLogger struct { + wrapped Logger +} + +func (wrapper coreLogger) Log(level gocbcore.LogLevel, offset int, format string, v ...interface{}) error { + return wrapper.wrapped.Log(LogLevel(level), offset+2, format, v...) +} + +func getCoreLogger(logger Logger) gocbcore.Logger { + typedLogger, isCoreLogger := logger.(*coreLogWrapper) + if isCoreLogger { + return typedLogger.wrapped + } + + return &coreLogger{ + wrapped: logger, + } +} + +// SetLogger sets a logger to be used by the library. A logger can be obtained via +// the DefaultStdioLogger() or VerboseStdioLogger() functions. You can also implement +// your own logger using the Logger interface. +func SetLogger(logger Logger) { + globalLogger = logger + gocbcore.SetLogger(getCoreLogger(logger)) + // gocbcore.SetLogRedactionLevel(gocbcore.LogRedactLevel(globalLogRedactionLevel)) +} + +func logExf(level LogLevel, offset int, format string, v ...interface{}) { + if globalLogger != nil { + err := globalLogger.Log(level, offset+1, format, v...) + if err != nil { + log.Printf("Logger error occurred (%s)\n", err) + } + } +} + +func logInfof(format string, v ...interface{}) { + logExf(LogInfo, 1, format, v...) +} + +func logDebugf(format string, v ...interface{}) { + logExf(LogDebug, 1, format, v...) +} + +func logSchedf(format string, v ...interface{}) { + logExf(LogSched, 1, format, v...) +} + +func logWarnf(format string, v ...interface{}) { + logExf(LogWarn, 1, format, v...) +} + +func logErrorf(format string, v ...interface{}) { + logExf(LogError, 1, format, v...) +} + +func reindentLog(indent, message string) string { + reindentedMessage := strings.Replace(message, "\n", "\n"+indent, -1) + return fmt.Sprintf("%s%s", indent, reindentedMessage) +} diff --git a/vendor/github.com/couchbase/gocb/v2/mgmt_http.go b/vendor/github.com/couchbase/gocb/v2/mgmt_http.go new file mode 100644 index 000000000000..8f0d424eb171 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/mgmt_http.go @@ -0,0 +1,126 @@ +package gocb + +import ( + "io" + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type mgmtRequest struct { + Service ServiceType + Method string + Path string + Body []byte + Headers map[string]string + ContentType string + IsIdempotent bool + UniqueID string + + Timeout time.Duration + RetryStrategy RetryStrategy + + parentSpan requestSpanContext +} + +type mgmtResponse struct { + Endpoint string + StatusCode uint32 + Body io.ReadCloser +} + +type mgmtProvider interface { + executeMgmtRequest(req mgmtRequest) (*mgmtResponse, error) +} + +func (c *Cluster) executeMgmtRequest(req mgmtRequest) (mgmtRespOut *mgmtResponse, errOut error) { + timeout := req.Timeout + if timeout == 0 { + timeout = c.timeoutsConfig.ManagementTimeout + } + + provider, err := c.getHTTPProvider() + if err != nil { + return nil, err + } + + retryStrategy := c.retryStrategyWrapper + if req.RetryStrategy != nil { + retryStrategy = newRetryStrategyWrapper(req.RetryStrategy) + } + + corereq := &gocbcore.HTTPRequest{ + Service: gocbcore.ServiceType(req.Service), + Method: req.Method, + Path: req.Path, + Body: req.Body, + Headers: req.Headers, + ContentType: req.ContentType, + IsIdempotent: req.IsIdempotent, + UniqueID: req.UniqueID, + Deadline: time.Now().Add(timeout), + RetryStrategy: retryStrategy, + TraceContext: req.parentSpan, + } + + coreresp, err := provider.DoHTTPRequest(corereq) + if err != nil { + return nil, makeGenericHTTPError(err, corereq, coreresp) + } + + resp := &mgmtResponse{ + Endpoint: coreresp.Endpoint, + StatusCode: uint32(coreresp.StatusCode), + Body: coreresp.Body, + } + return resp, nil +} + +func (b *Bucket) executeMgmtRequest(req mgmtRequest) (mgmtRespOut *mgmtResponse, errOut error) { + timeout := req.Timeout + if timeout == 0 { + timeout = b.timeoutsConfig.ManagementTimeout + } + + provider, err := b.connectionManager.getHTTPProvider() + if err != nil { + return nil, err + } + + retryStrategy := b.retryStrategyWrapper + if req.RetryStrategy != nil { + retryStrategy = newRetryStrategyWrapper(req.RetryStrategy) + } + + corereq := &gocbcore.HTTPRequest{ + Service: gocbcore.ServiceType(req.Service), + Method: req.Method, + Path: req.Path, + Body: req.Body, + Headers: req.Headers, + ContentType: req.ContentType, + IsIdempotent: req.IsIdempotent, + UniqueID: req.UniqueID, + Deadline: time.Now().Add(timeout), + RetryStrategy: retryStrategy, + } + + coreresp, err := provider.DoHTTPRequest(corereq) + if err != nil { + return nil, makeGenericHTTPError(err, corereq, coreresp) + } + + resp := &mgmtResponse{ + Endpoint: coreresp.Endpoint, + StatusCode: uint32(coreresp.StatusCode), + Body: coreresp.Body, + } + return resp, nil +} + +func ensureBodyClosed(body io.ReadCloser) { + err := body.Close() + if err != nil { + logDebugf("Failed to close socket: %v", err) + } +} diff --git a/vendor/github.com/couchbase/gocb/v2/providers.go b/vendor/github.com/couchbase/gocb/v2/providers.go new file mode 100644 index 000000000000..66e6468bf54c --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/providers.go @@ -0,0 +1,230 @@ +package gocb + +import ( + "time" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +type httpProvider interface { + DoHTTPRequest(req *gocbcore.HTTPRequest) (*gocbcore.HTTPResponse, error) +} + +type viewProvider interface { + ViewQuery(opts gocbcore.ViewQueryOptions) (viewRowReader, error) +} + +type queryProvider interface { + N1QLQuery(opts gocbcore.N1QLQueryOptions) (queryRowReader, error) + PreparedN1QLQuery(opts gocbcore.N1QLQueryOptions) (queryRowReader, error) +} + +type analyticsProvider interface { + AnalyticsQuery(opts gocbcore.AnalyticsQueryOptions) (analyticsRowReader, error) +} + +type searchProvider interface { + SearchQuery(opts gocbcore.SearchQueryOptions) (searchRowReader, error) +} + +type waitUntilReadyProvider interface { + WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions) error +} + +type gocbcoreWaitUntilReadyProvider interface { + WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions, + cb gocbcore.WaitUntilReadyCallback) (gocbcore.PendingOp, error) +} + +type diagnosticsProvider interface { + Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error) + Ping(opts gocbcore.PingOptions) (*gocbcore.PingResult, error) +} + +type gocbcoreDiagnosticsProvider interface { + Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error) + Ping(opts gocbcore.PingOptions, cb gocbcore.PingCallback) (gocbcore.PendingOp, error) +} + +type waitUntilReadyProviderWrapper struct { + provider gocbcoreWaitUntilReadyProvider +} + +func (wpw *waitUntilReadyProviderWrapper) WaitUntilReady(deadline time.Time, opts gocbcore.WaitUntilReadyOptions) (errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(wpw.provider.WaitUntilReady(deadline, opts, func(res *gocbcore.WaitUntilReadyResult, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +type diagnosticsProviderWrapper struct { + provider gocbcoreDiagnosticsProvider +} + +func (dpw *diagnosticsProviderWrapper) Diagnostics(opts gocbcore.DiagnosticsOptions) (*gocbcore.DiagnosticInfo, error) { + return dpw.provider.Diagnostics(opts) +} + +func (dpw *diagnosticsProviderWrapper) Ping(opts gocbcore.PingOptions) (pOut *gocbcore.PingResult, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(dpw.provider.Ping(opts, func(res *gocbcore.PingResult, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + pOut = res + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +type httpProviderWrapper struct { + provider *gocbcore.AgentGroup +} + +func (hpw *httpProviderWrapper) DoHTTPRequest(req *gocbcore.HTTPRequest) (respOut *gocbcore.HTTPResponse, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(hpw.provider.DoHTTPRequest(req, func(res *gocbcore.HTTPResponse, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + respOut = res + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +type analyticsProviderWrapper struct { + provider *gocbcore.AgentGroup +} + +func (apw *analyticsProviderWrapper) AnalyticsQuery(opts gocbcore.AnalyticsQueryOptions) (aOut analyticsRowReader, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(apw.provider.AnalyticsQuery(opts, func(reader *gocbcore.AnalyticsRowReader, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + aOut = reader + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +type queryProviderWrapper struct { + provider *gocbcore.AgentGroup +} + +func (apw *queryProviderWrapper) N1QLQuery(opts gocbcore.N1QLQueryOptions) (qOut queryRowReader, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(apw.provider.N1QLQuery(opts, func(reader *gocbcore.N1QLRowReader, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + qOut = reader + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +func (apw *queryProviderWrapper) PreparedN1QLQuery(opts gocbcore.N1QLQueryOptions) (qOut queryRowReader, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(apw.provider.PreparedN1QLQuery(opts, func(reader *gocbcore.N1QLRowReader, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + qOut = reader + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +type searchProviderWrapper struct { + provider *gocbcore.AgentGroup +} + +func (apw *searchProviderWrapper) SearchQuery(opts gocbcore.SearchQueryOptions) (sOut searchRowReader, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(apw.provider.SearchQuery(opts, func(reader *gocbcore.SearchRowReader, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + sOut = reader + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} + +type viewProviderWrapper struct { + provider *gocbcore.AgentGroup +} + +func (apw *viewProviderWrapper) ViewQuery(opts gocbcore.ViewQueryOptions) (vOut viewRowReader, errOut error) { + opm := newAsyncOpManager() + err := opm.Wait(apw.provider.ViewQuery(opts, func(reader *gocbcore.ViewQueryRowReader, err error) { + if err != nil { + errOut = err + opm.Reject() + return + } + + vOut = reader + opm.Resolve() + })) + if err != nil { + errOut = err + } + + return +} diff --git a/vendor/github.com/couchbase/gocb/v2/query_options.go b/vendor/github.com/couchbase/gocb/v2/query_options.go new file mode 100644 index 000000000000..822cc3109a79 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/query_options.go @@ -0,0 +1,144 @@ +package gocb + +import ( + "strconv" + "strings" + "time" + + "github.com/google/uuid" +) + +// QueryScanConsistency indicates the level of data consistency desired for a query. +type QueryScanConsistency uint + +const ( + // QueryScanConsistencyNotBounded indicates no data consistency is required. + QueryScanConsistencyNotBounded QueryScanConsistency = iota + 1 + // QueryScanConsistencyRequestPlus indicates that request-level data consistency is required. + QueryScanConsistencyRequestPlus +) + +// QueryOptions represents the options available when executing a query. +type QueryOptions struct { + ScanConsistency QueryScanConsistency + ConsistentWith *MutationState + Profile QueryProfileMode + + // ScanCap is the maximum buffered channel size between the indexer connectionManager and the query service for index scans. + ScanCap uint32 + + // PipelineBatch controls the number of items execution operators can batch for Fetch from the KV. + PipelineBatch uint32 + + // PipelineCap controls the maximum number of items each execution operator can buffer between various operators. + PipelineCap uint32 + + // ScanWait is how long the indexer is allowed to wait until it can satisfy ScanConsistency/ConsistentWith criteria. + ScanWait time.Duration + Readonly bool + + // MaxParallelism is the maximum number of index partitions, for computing aggregation in parallel. + MaxParallelism uint32 + + // ClientContextID provides a unique ID for this query which can be used matching up requests between connectionManager and + // server. If not provided will be assigned a uuid value. + ClientContextID string + PositionalParameters []interface{} + NamedParameters map[string]interface{} + Metrics bool + + // Raw provides a way to provide extra parameters in the request body for the query. + Raw map[string]interface{} + + Adhoc bool + Timeout time.Duration + RetryStrategy RetryStrategy + + parentSpan requestSpanContext +} + +func (opts *QueryOptions) toMap() (map[string]interface{}, error) { + execOpts := make(map[string]interface{}) + + if opts.ScanConsistency != 0 && opts.ConsistentWith != nil { + return nil, makeInvalidArgumentsError("ScanConsistency and ConsistentWith must be used exclusively") + } + + if opts.ScanConsistency != 0 { + if opts.ScanConsistency == QueryScanConsistencyNotBounded { + execOpts["scan_consistency"] = "not_bounded" + } else if opts.ScanConsistency == QueryScanConsistencyRequestPlus { + execOpts["scan_consistency"] = "request_plus" + } else { + return nil, makeInvalidArgumentsError("Unexpected consistency option") + } + } + + if opts.ConsistentWith != nil { + execOpts["scan_consistency"] = "at_plus" + execOpts["scan_vectors"] = opts.ConsistentWith + } + + if opts.Profile != "" { + execOpts["profile"] = opts.Profile + } + + if opts.Readonly { + execOpts["readonly"] = opts.Readonly + } + + if opts.PositionalParameters != nil && opts.NamedParameters != nil { + return nil, makeInvalidArgumentsError("Positional and named parameters must be used exclusively") + } + + if opts.PositionalParameters != nil { + execOpts["args"] = opts.PositionalParameters + } + + if opts.NamedParameters != nil { + for key, value := range opts.NamedParameters { + if !strings.HasPrefix(key, "$") { + key = "$" + key + } + execOpts[key] = value + } + } + + if opts.ScanCap != 0 { + execOpts["scan_cap"] = strconv.FormatUint(uint64(opts.ScanCap), 10) + } + + if opts.PipelineBatch != 0 { + execOpts["pipeline_batch"] = strconv.FormatUint(uint64(opts.PipelineBatch), 10) + } + + if opts.PipelineCap != 0 { + execOpts["pipeline_cap"] = strconv.FormatUint(uint64(opts.PipelineCap), 10) + } + + if opts.ScanWait > 0 { + execOpts["scan_wait"] = opts.ScanWait.String() + } + + if opts.Raw != nil { + for k, v := range opts.Raw { + execOpts[k] = v + } + } + + if opts.MaxParallelism > 0 { + execOpts["max_parallelism"] = strconv.FormatUint(uint64(opts.MaxParallelism), 10) + } + + if !opts.Metrics { + execOpts["metrics"] = false + } + + if opts.ClientContextID == "" { + execOpts["client_context_id"] = uuid.New() + } else { + execOpts["client_context_id"] = opts.ClientContextID + } + + return execOpts, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/results.go b/vendor/github.com/couchbase/gocb/v2/results.go new file mode 100644 index 000000000000..4db961d74888 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/results.go @@ -0,0 +1,350 @@ +package gocb + +import ( + "encoding/json" + "time" + + "github.com/pkg/errors" +) + +// Result is the base type for the return types of operations +type Result struct { + cas Cas +} + +// Cas returns the cas of the result. +func (d *Result) Cas() Cas { + return d.cas +} + +// GetResult is the return type of Get operations. +type GetResult struct { + Result + transcoder Transcoder + flags uint32 + contents []byte + expiry *time.Duration +} + +// Content assigns the value of the result into the valuePtr using default decoding. +func (d *GetResult) Content(valuePtr interface{}) error { + return d.transcoder.Decode(d.contents, d.flags, valuePtr) +} + +// Expiry returns the expiry value for the result if it available. Note that a nil +// pointer indicates that the Expiry was fetched, while a valid pointer to a zero +// Duration indicates that the document will never expire. +func (d *GetResult) Expiry() *time.Duration { + return d.expiry +} + +func (d *GetResult) fromFullProjection(ops []LookupInSpec, result *LookupInResult, fields []string) error { + if len(fields) == 0 { + // This is a special case where user specified a full doc fetch with expiration. + d.contents = result.contents[0].data + return nil + } + + if len(result.contents) != 1 { + return makeInvalidArgumentsError("fromFullProjection should only be called with 1 subdoc result") + } + + resultContent := result.contents[0] + if resultContent.err != nil { + return resultContent.err + } + + var content map[string]interface{} + err := json.Unmarshal(resultContent.data, &content) + if err != nil { + return err + } + + newContent := make(map[string]interface{}) + for _, field := range fields { + parts := d.pathParts(field) + d.set(parts, newContent, content[field]) + } + + bytes, err := json.Marshal(newContent) + if err != nil { + return errors.Wrap(err, "could not marshal result contents") + } + d.contents = bytes + + return nil +} + +func (d *GetResult) fromSubDoc(ops []LookupInSpec, result *LookupInResult) error { + content := make(map[string]interface{}) + + for i, op := range ops { + err := result.contents[i].err + if err != nil { + // We return the first error that has occurred, this will be + // a SubDocument error and will indicate the real reason. + return err + } + + parts := d.pathParts(op.path) + d.set(parts, content, result.contents[i].data) + } + + bytes, err := json.Marshal(content) + if err != nil { + return errors.Wrap(err, "could not marshal result contents") + } + d.contents = bytes + + return nil +} + +type subdocPath struct { + path string + isArray bool +} + +func (d *GetResult) pathParts(pathStr string) []subdocPath { + pathLen := len(pathStr) + var elemIdx int + var i int + var paths []subdocPath + + for i < pathLen { + ch := pathStr[i] + i++ + + if ch == '[' { + // opening of an array + isArr := false + arrayStart := i + + for i < pathLen { + arrCh := pathStr[i] + if arrCh == ']' { + isArr = true + i++ + break + } else if arrCh == '.' { + i++ + break + } + i++ + } + + if isArr { + paths = append(paths, subdocPath{path: pathStr[elemIdx : arrayStart-1], isArray: true}) + } else { + paths = append(paths, subdocPath{path: pathStr[elemIdx:i], isArray: false}) + } + elemIdx = i + + if i < pathLen && pathStr[i] == '.' { + i++ + elemIdx = i + } + } else if ch == '.' { + paths = append(paths, subdocPath{path: pathStr[elemIdx : i-1]}) + elemIdx = i + } + } + + if elemIdx != i { + // this should only ever be an object as an array would have ended in [...] + paths = append(paths, subdocPath{path: pathStr[elemIdx:i]}) + } + + return paths +} + +func (d *GetResult) set(paths []subdocPath, content interface{}, value interface{}) interface{} { + path := paths[0] + if len(paths) == 1 { + if path.isArray { + arr := make([]interface{}, 0) + arr = append(arr, value) + if _, ok := content.(map[string]interface{}); ok { + content.(map[string]interface{})[path.path] = arr + } else if _, ok := content.([]interface{}); ok { + content = append(content.([]interface{}), arr) + } else { + logErrorf("Projections encountered a non-array or object content assigning an array") + } + } else { + if _, ok := content.([]interface{}); ok { + elem := make(map[string]interface{}) + elem[path.path] = value + content = append(content.([]interface{}), elem) + } else { + content.(map[string]interface{})[path.path] = value + } + } + return content + } + + if path.isArray { + if _, ok := content.([]interface{}); ok { + var m []interface{} + content = append(content.([]interface{}), d.set(paths[1:], m, value)) + return content + } else if cMap, ok := content.(map[string]interface{}); ok { + cMap[path.path] = make([]interface{}, 0) + cMap[path.path] = d.set(paths[1:], cMap[path.path], value) + return content + + } else { + logErrorf("Projections encountered a non-array or object content assigning an array") + } + } else { + if arr, ok := content.([]interface{}); ok { + m := make(map[string]interface{}) + m[path.path] = make(map[string]interface{}) + content = append(arr, m) + d.set(paths[1:], m[path.path], value) + return content + } + cMap, ok := content.(map[string]interface{}) + if !ok { + // this isn't possible but the linter won't play nice without it + logErrorf("Failed to assert projection content to a map") + } + cMap[path.path] = make(map[string]interface{}) + return d.set(paths[1:], cMap[path.path], value) + } + + return content +} + +// LookupInResult is the return type for LookupIn. +type LookupInResult struct { + Result + contents []lookupInPartial +} + +type lookupInPartial struct { + data json.RawMessage + err error +} + +func (pr *lookupInPartial) as(valuePtr interface{}) error { + if pr.err != nil { + return pr.err + } + + if valuePtr == nil { + return nil + } + + if valuePtr, ok := valuePtr.(*[]byte); ok { + *valuePtr = pr.data + return nil + } + + return json.Unmarshal(pr.data, valuePtr) +} + +func (pr *lookupInPartial) exists() bool { + err := pr.as(nil) + return err == nil +} + +// ContentAt retrieves the value of the operation by its index. The index is the position of +// the operation as it was added to the builder. +func (lir *LookupInResult) ContentAt(idx uint, valuePtr interface{}) error { + if idx >= uint(len(lir.contents)) { + return makeInvalidArgumentsError("invalid index") + } + return lir.contents[idx].as(valuePtr) +} + +// Exists verifies that the item at idx exists. +func (lir *LookupInResult) Exists(idx uint) bool { + if idx >= uint(len(lir.contents)) { + return false + } + return lir.contents[idx].exists() +} + +// ExistsResult is the return type of Exist operations. +type ExistsResult struct { + Result + docExists bool +} + +// Exists returns whether or not the document exists. +func (d *ExistsResult) Exists() bool { + return d.docExists +} + +// MutationResult is the return type of any store related operations. It contains Cas and mutation tokens. +type MutationResult struct { + Result + mt *MutationToken +} + +// MutationToken returns the mutation token belonging to an operation. +func (mr MutationResult) MutationToken() *MutationToken { + return mr.mt +} + +// MutateInResult is the return type of any mutate in related operations. +// It contains Cas, mutation tokens and any returned content. +type MutateInResult struct { + MutationResult + contents []mutateInPartial +} + +type mutateInPartial struct { + data json.RawMessage +} + +func (pr *mutateInPartial) as(valuePtr interface{}) error { + if valuePtr == nil { + return nil + } + + if valuePtr, ok := valuePtr.(*[]byte); ok { + *valuePtr = pr.data + return nil + } + + return json.Unmarshal(pr.data, valuePtr) +} + +// ContentAt retrieves the value of the operation by its index. The index is the position of +// the operation as it was added to the builder. +func (mir MutateInResult) ContentAt(idx uint, valuePtr interface{}) error { + return mir.contents[idx].as(valuePtr) +} + +// CounterResult is the return type of counter operations. +type CounterResult struct { + MutationResult + content uint64 +} + +// MutationToken returns the mutation token belonging to an operation. +func (mr CounterResult) MutationToken() *MutationToken { + return mr.mt +} + +// Cas returns the Cas value for a document following an operation. +func (mr CounterResult) Cas() Cas { + return mr.cas +} + +// Content returns the new value for the counter document. +func (mr CounterResult) Content() uint64 { + return mr.content +} + +// GetReplicaResult is the return type of GetReplica operations. +type GetReplicaResult struct { + GetResult + isReplica bool +} + +// IsReplica returns whether or not this result came from a replica server. +func (r *GetReplicaResult) IsReplica() bool { + return r.isReplica +} diff --git a/vendor/github.com/couchbase/gocb/v2/retry.go b/vendor/github.com/couchbase/gocb/v2/retry.go new file mode 100644 index 000000000000..87905de9573c --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/retry.go @@ -0,0 +1,194 @@ +package gocb + +import ( + "time" + + "github.com/couchbase/gocbcore/v9" +) + +func translateCoreRetryReasons(reasons []gocbcore.RetryReason) []RetryReason { + var reasonsOut []RetryReason + + for _, retryReason := range reasons { + gocbReason, ok := retryReason.(RetryReason) + if !ok { + logErrorf("Failed to assert gocbcore retry reason to gocb retry reason: %v", retryReason) + continue + } + reasonsOut = append(reasonsOut, gocbReason) + } + + return reasonsOut +} + +// RetryRequest is a request that can possibly be retried. +type RetryRequest interface { + RetryAttempts() uint32 + Identifier() string + Idempotent() bool + RetryReasons() []RetryReason +} + +type wrappedRetryRequest struct { + req gocbcore.RetryRequest +} + +func (req *wrappedRetryRequest) RetryAttempts() uint32 { + return req.req.RetryAttempts() +} + +func (req *wrappedRetryRequest) Identifier() string { + return req.req.Identifier() +} + +func (req *wrappedRetryRequest) Idempotent() bool { + return req.req.Idempotent() +} + +func (req *wrappedRetryRequest) RetryReasons() []RetryReason { + return translateCoreRetryReasons(req.req.RetryReasons()) +} + +// RetryReason represents the reason for an operation possibly being retried. +type RetryReason interface { + AllowsNonIdempotentRetry() bool + AlwaysRetry() bool + Description() string +} + +var ( + // UnknownRetryReason indicates that the operation failed for an unknown reason. + UnknownRetryReason = RetryReason(gocbcore.UnknownRetryReason) + + // SocketNotAvailableRetryReason indicates that the operation failed because the underlying socket was not available. + SocketNotAvailableRetryReason = RetryReason(gocbcore.SocketNotAvailableRetryReason) + + // ServiceNotAvailableRetryReason indicates that the operation failed because the requested service was not available. + ServiceNotAvailableRetryReason = RetryReason(gocbcore.ServiceNotAvailableRetryReason) + + // NodeNotAvailableRetryReason indicates that the operation failed because the requested node was not available. + NodeNotAvailableRetryReason = RetryReason(gocbcore.NodeNotAvailableRetryReason) + + // KVNotMyVBucketRetryReason indicates that the operation failed because it was sent to the wrong node for the vbucket. + KVNotMyVBucketRetryReason = RetryReason(gocbcore.KVNotMyVBucketRetryReason) + + // KVCollectionOutdatedRetryReason indicates that the operation failed because the collection ID on the request is outdated. + KVCollectionOutdatedRetryReason = RetryReason(gocbcore.KVCollectionOutdatedRetryReason) + + // KVErrMapRetryReason indicates that the operation failed for an unsupported reason but the KV error map indicated + // that the operation can be retried. + KVErrMapRetryReason = RetryReason(gocbcore.KVErrMapRetryReason) + + // KVLockedRetryReason indicates that the operation failed because the document was locked. + KVLockedRetryReason = RetryReason(gocbcore.KVLockedRetryReason) + + // KVTemporaryFailureRetryReason indicates that the operation failed because of a temporary failure. + KVTemporaryFailureRetryReason = RetryReason(gocbcore.KVTemporaryFailureRetryReason) + + // KVSyncWriteInProgressRetryReason indicates that the operation failed because a sync write is in progress. + KVSyncWriteInProgressRetryReason = RetryReason(gocbcore.KVSyncWriteInProgressRetryReason) + + // KVSyncWriteRecommitInProgressRetryReason indicates that the operation failed because a sync write recommit is in progress. + KVSyncWriteRecommitInProgressRetryReason = RetryReason(gocbcore.KVSyncWriteRecommitInProgressRetryReason) + + // ServiceResponseCodeIndicatedRetryReason indicates that the operation failed and the service responded stating that + // the request should be retried. + ServiceResponseCodeIndicatedRetryReason = RetryReason(gocbcore.ServiceResponseCodeIndicatedRetryReason) + + // SocketCloseInFlightRetryReason indicates that the operation failed because the socket was closed whilst the operation + // was in flight. + SocketCloseInFlightRetryReason = RetryReason(gocbcore.SocketCloseInFlightRetryReason) + + // CircuitBreakerOpenRetryReason indicates that the operation failed because the circuit breaker on the connection + // was open. + CircuitBreakerOpenRetryReason = RetryReason(gocbcore.CircuitBreakerOpenRetryReason) + + // QueryIndexNotFoundRetryReason indicates that the operation failed to to a missing query index + QueryIndexNotFoundRetryReason = RetryReason(gocbcore.QueryIndexNotFoundRetryReason) + + // QueryPreparedStatementFailureRetryReason indicates that the operation failed due to a prepared statement failure + QueryPreparedStatementFailureRetryReason = RetryReason(gocbcore.QueryPreparedStatementFailureRetryReason) + + // AnalyticsTemporaryFailureRetryReason indicates that an analytics operation failed due to a temporary failure + AnalyticsTemporaryFailureRetryReason = RetryReason(gocbcore.AnalyticsTemporaryFailureRetryReason) + + // SearchTooManyRequestsRetryReason indicates that a search operation failed due to too many requests + SearchTooManyRequestsRetryReason = RetryReason(gocbcore.SearchTooManyRequestsRetryReason) +) + +// RetryAction is used by a RetryStrategy to calculate the duration to wait before retrying an operation. +// Returning a value of 0 indicates to not retry. +type RetryAction interface { + Duration() time.Duration +} + +// NoRetryRetryAction represents an action that indicates to not retry. +type NoRetryRetryAction struct { +} + +// Duration is the length of time to wait before retrying an operation. +func (ra *NoRetryRetryAction) Duration() time.Duration { + return 0 +} + +// WithDurationRetryAction represents an action that indicates to retry with a given duration. +type WithDurationRetryAction struct { + WithDuration time.Duration +} + +// Duration is the length of time to wait before retrying an operation. +func (ra *WithDurationRetryAction) Duration() time.Duration { + return ra.WithDuration +} + +// RetryStrategy is to determine if an operation should be retried, and if so how long to wait before retrying. +type RetryStrategy interface { + RetryAfter(req RetryRequest, reason RetryReason) RetryAction +} + +func newRetryStrategyWrapper(strategy RetryStrategy) *retryStrategyWrapper { + return &retryStrategyWrapper{ + wrapped: strategy, + } +} + +type retryStrategyWrapper struct { + wrapped RetryStrategy +} + +// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation. +func (rs *retryStrategyWrapper) RetryAfter(req gocbcore.RetryRequest, reason gocbcore.RetryReason) gocbcore.RetryAction { + wreq := &wrappedRetryRequest{ + req: req, + } + wrappedAction := rs.wrapped.RetryAfter(wreq, RetryReason(reason)) + return gocbcore.RetryAction(wrappedAction) +} + +// BackoffCalculator defines how backoff durations will be calculated by the retry API. +type BackoffCalculator func(retryAttempts uint32) time.Duration + +// BestEffortRetryStrategy represents a strategy that will keep retrying until it succeeds (or the caller times out +// the request). +type BestEffortRetryStrategy struct { + BackoffCalculator BackoffCalculator +} + +// NewBestEffortRetryStrategy returns a new BestEffortRetryStrategy which will use the supplied calculator function +// to calculate retry durations. If calculator is nil then a controlled backoff will be used. +func NewBestEffortRetryStrategy(calculator BackoffCalculator) *BestEffortRetryStrategy { + if calculator == nil { + calculator = BackoffCalculator(gocbcore.ExponentialBackoff(1*time.Millisecond, 500*time.Millisecond, 2)) + } + + return &BestEffortRetryStrategy{BackoffCalculator: calculator} +} + +// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation. +func (rs *BestEffortRetryStrategy) RetryAfter(req RetryRequest, reason RetryReason) RetryAction { + if req.Idempotent() || reason.AllowsNonIdempotentRetry() { + return &WithDurationRetryAction{WithDuration: rs.BackoffCalculator(req.RetryAttempts())} + } + + return &NoRetryRetryAction{} +} diff --git a/vendor/github.com/couchbase/gocb/v2/scope.go b/vendor/github.com/couchbase/gocb/v2/scope.go new file mode 100644 index 000000000000..981fac91b5af --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/scope.go @@ -0,0 +1,55 @@ +package gocb + +// Scope represents a single scope within a bucket. +// VOLATILE: This API is subject to change at any time. +type Scope struct { + scopeName string + bucket *Bucket + + timeoutsConfig kvTimeoutsConfig + + transcoder Transcoder + retryStrategyWrapper *retryStrategyWrapper + tracer requestTracer + + useMutationTokens bool + + getKvProvider func() (kvProvider, error) +} + +func newScope(bucket *Bucket, scopeName string) *Scope { + return &Scope{ + scopeName: scopeName, + bucket: bucket, + + timeoutsConfig: kvTimeoutsConfig{ + KVTimeout: bucket.timeoutsConfig.KVTimeout, + KVDurableTimeout: bucket.timeoutsConfig.KVDurableTimeout, + }, + + transcoder: bucket.transcoder, + retryStrategyWrapper: bucket.retryStrategyWrapper, + tracer: bucket.tracer, + + useMutationTokens: bucket.useMutationTokens, + + getKvProvider: bucket.getKvProvider, + } +} + +// Name returns the name of the scope. +func (s *Scope) Name() string { + return s.scopeName +} + +// BucketName returns the name of the bucket to which this collection belongs. +// UNCOMMITTED: This API may change in the future. +func (s *Scope) BucketName() string { + return s.bucket.Name() +} + +// Collection returns an instance of a collection. +// VOLATILE: This API is subject to change at any time. +func (s *Scope) Collection(collectionName string) *Collection { + return newCollection(s, collectionName) +} diff --git a/vendor/github.com/couchbase/gocb/v2/search/facets.go b/vendor/github.com/couchbase/gocb/v2/search/facets.go new file mode 100644 index 000000000000..accc40dbd3ed --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/search/facets.go @@ -0,0 +1,110 @@ +package search + +import ( + "encoding/json" +) + +// Facet represents a facet for a search query. +type Facet interface { +} + +type termFacetData struct { + Field string `json:"field,omitempty"` + Size uint64 `json:"size,omitempty"` +} + +// TermFacet is an search term facet. +type TermFacet struct { + data termFacetData +} + +// MarshalJSON marshal's this facet to JSON for the search REST API. +func (f TermFacet) MarshalJSON() ([]byte, error) { + return json.Marshal(f.data) +} + +// NewTermFacet creates a new TermFacet +func NewTermFacet(field string, size uint64) *TermFacet { + mq := &TermFacet{} + mq.data.Field = field + mq.data.Size = size + return mq +} + +type numericFacetRange struct { + Name string `json:"name,omitempty"` + Start float64 `json:"start,omitempty"` + End float64 `json:"end,omitempty"` +} +type numericFacetData struct { + Field string `json:"field,omitempty"` + Size uint64 `json:"size,omitempty"` + NumericRanges []numericFacetRange `json:"numeric_ranges,omitempty"` +} + +// NumericFacet is an search numeric range facet. +type NumericFacet struct { + data numericFacetData +} + +// MarshalJSON marshal's this facet to JSON for the search REST API. +func (f NumericFacet) MarshalJSON() ([]byte, error) { + return json.Marshal(f.data) +} + +// AddRange adds a new range to this numeric range facet. +func (f *NumericFacet) AddRange(name string, start, end float64) *NumericFacet { + f.data.NumericRanges = append(f.data.NumericRanges, numericFacetRange{ + Name: name, + Start: start, + End: end, + }) + return f +} + +// NewNumericFacet creates a new numeric range facet. +func NewNumericFacet(field string, size uint64) *NumericFacet { + mq := &NumericFacet{} + mq.data.Field = field + mq.data.Size = size + return mq +} + +type dateFacetRange struct { + Name string `json:"name,omitempty"` + Start string `json:"start,omitempty"` + End string `json:"end,omitempty"` +} +type dateFacetData struct { + Field string `json:"field,omitempty"` + Size uint64 `json:"size,omitempty"` + DateRanges []dateFacetRange `json:"date_ranges,omitempty"` +} + +// DateFacet is an search date range facet. +type DateFacet struct { + data dateFacetData +} + +// MarshalJSON marshal's this facet to JSON for the search REST API. +func (f DateFacet) MarshalJSON() ([]byte, error) { + return json.Marshal(f.data) +} + +// AddRange adds a new range to this date range facet. +func (f *DateFacet) AddRange(name string, start, end string) *DateFacet { + f.data.DateRanges = append(f.data.DateRanges, dateFacetRange{ + Name: name, + Start: start, + End: end, + }) + return f +} + +// NewDateFacet creates a new date range facet. +func NewDateFacet(field string, size uint64) *DateFacet { + mq := &DateFacet{} + mq.data.Field = field + mq.data.Size = size + return mq +} diff --git a/vendor/github.com/couchbase/gocb/v2/search/queries.go b/vendor/github.com/couchbase/gocb/v2/search/queries.go new file mode 100644 index 000000000000..7d34836d6256 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/search/queries.go @@ -0,0 +1,620 @@ +package search + +import "encoding/json" + +// Query represents a search query. +type Query interface { +} + +type searchQueryBase struct { + options map[string]interface{} +} + +func newSearchQueryBase() searchQueryBase { + return searchQueryBase{ + options: make(map[string]interface{}), + } +} + +// MarshalJSON marshal's this query to JSON for the search REST API. +func (q searchQueryBase) MarshalJSON() ([]byte, error) { + return json.Marshal(q.options) +} + +// MatchQuery represents a search match query. +type MatchQuery struct { + searchQueryBase +} + +// NewMatchQuery creates a new MatchQuery. +func NewMatchQuery(match string) *MatchQuery { + q := &MatchQuery{newSearchQueryBase()} + q.options["match"] = match + return q +} + +// Field specifies the field for this query. +func (q *MatchQuery) Field(field string) *MatchQuery { + q.options["field"] = field + return q +} + +// Analyzer specifies the analyzer to use for this query. +func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { + q.options["analyzer"] = analyzer + return q +} + +// PrefixLength specifies the prefix length from this query. +func (q *MatchQuery) PrefixLength(length uint64) *MatchQuery { + q.options["prefix_length"] = length + return q +} + +// Fuzziness specifies the fuziness for this query. +func (q *MatchQuery) Fuzziness(fuzziness uint64) *MatchQuery { + q.options["fuzziness"] = fuzziness + return q +} + +// Boost specifies the boost for this query. +func (q *MatchQuery) Boost(boost float32) *MatchQuery { + q.options["boost"] = boost + return q +} + +// MatchPhraseQuery represents a search match phrase query. +type MatchPhraseQuery struct { + searchQueryBase +} + +// NewMatchPhraseQuery creates a new MatchPhraseQuery +func NewMatchPhraseQuery(phrase string) *MatchPhraseQuery { + q := &MatchPhraseQuery{newSearchQueryBase()} + q.options["match_phrase"] = phrase + return q +} + +// Field specifies the field for this query. +func (q *MatchPhraseQuery) Field(field string) *MatchPhraseQuery { + q.options["field"] = field + return q +} + +// Analyzer specifies the analyzer to use for this query. +func (q *MatchPhraseQuery) Analyzer(analyzer string) *MatchPhraseQuery { + q.options["analyzer"] = analyzer + return q +} + +// Boost specifies the boost for this query. +func (q *MatchPhraseQuery) Boost(boost float32) *MatchPhraseQuery { + q.options["boost"] = boost + return q +} + +// RegexpQuery represents a search regular expression query. +type RegexpQuery struct { + searchQueryBase +} + +// NewRegexpQuery creates a new RegexpQuery. +func NewRegexpQuery(regexp string) *RegexpQuery { + q := &RegexpQuery{newSearchQueryBase()} + q.options["regexp"] = regexp + return q +} + +// Field specifies the field for this query. +func (q *RegexpQuery) Field(field string) *RegexpQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *RegexpQuery) Boost(boost float32) *RegexpQuery { + q.options["boost"] = boost + return q +} + +// QueryStringQuery represents a search string query. +type QueryStringQuery struct { + searchQueryBase +} + +// NewQueryStringQuery creates a new StringQuery. +func NewQueryStringQuery(query string) *QueryStringQuery { + q := &QueryStringQuery{newSearchQueryBase()} + q.options["query"] = query + return q +} + +// Boost specifies the boost for this query. +func (q *QueryStringQuery) Boost(boost float32) *QueryStringQuery { + q.options["boost"] = boost + return q +} + +// NumericRangeQuery represents a search numeric range query. +type NumericRangeQuery struct { + searchQueryBase +} + +// NewNumericRangeQuery creates a new NumericRangeQuery. +func NewNumericRangeQuery() *NumericRangeQuery { + q := &NumericRangeQuery{newSearchQueryBase()} + return q +} + +// Min specifies the minimum value and inclusiveness for this range query. +func (q *NumericRangeQuery) Min(min float32, inclusive bool) *NumericRangeQuery { + q.options["min"] = min + q.options["inclusive_min"] = inclusive + return q +} + +// Max specifies the maximum value and inclusiveness for this range query. +func (q *NumericRangeQuery) Max(max float32, inclusive bool) *NumericRangeQuery { + q.options["max"] = max + q.options["inclusive_max"] = inclusive + return q +} + +// Field specifies the field for this query. +func (q *NumericRangeQuery) Field(field string) *NumericRangeQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *NumericRangeQuery) Boost(boost float32) *NumericRangeQuery { + q.options["boost"] = boost + return q +} + +// DateRangeQuery represents a search date range query. +type DateRangeQuery struct { + searchQueryBase +} + +// NewDateRangeQuery creates a new DateRangeQuery. +func NewDateRangeQuery() *DateRangeQuery { + q := &DateRangeQuery{newSearchQueryBase()} + return q +} + +// Start specifies the start value and inclusiveness for this range query. +func (q *DateRangeQuery) Start(start string, inclusive bool) *DateRangeQuery { + q.options["start"] = start + q.options["inclusive_start"] = inclusive + return q +} + +// End specifies the end value and inclusiveness for this range query. +func (q *DateRangeQuery) End(end string, inclusive bool) *DateRangeQuery { + q.options["end"] = end + q.options["inclusive_end"] = inclusive + return q +} + +// DateTimeParser specifies which date time string parser to use. +func (q *DateRangeQuery) DateTimeParser(parser string) *DateRangeQuery { + q.options["datetime_parser"] = parser + return q +} + +// Field specifies the field for this query. +func (q *DateRangeQuery) Field(field string) *DateRangeQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *DateRangeQuery) Boost(boost float32) *DateRangeQuery { + q.options["boost"] = boost + return q +} + +// ConjunctionQuery represents a search conjunction query. +type ConjunctionQuery struct { + searchQueryBase +} + +// NewConjunctionQuery creates a new ConjunctionQuery. +func NewConjunctionQuery(queries ...Query) *ConjunctionQuery { + q := &ConjunctionQuery{newSearchQueryBase()} + q.options["conjuncts"] = []Query{} + return q.And(queries...) +} + +// And adds new predicate queries to this conjunction query. +func (q *ConjunctionQuery) And(queries ...Query) *ConjunctionQuery { + q.options["conjuncts"] = append(q.options["conjuncts"].([]Query), queries...) + return q +} + +// Boost specifies the boost for this query. +func (q *ConjunctionQuery) Boost(boost float32) *ConjunctionQuery { + q.options["boost"] = boost + return q +} + +// DisjunctionQuery represents a search disjunction query. +type DisjunctionQuery struct { + searchQueryBase +} + +// NewDisjunctionQuery creates a new DisjunctionQuery. +func NewDisjunctionQuery(queries ...Query) *DisjunctionQuery { + q := &DisjunctionQuery{newSearchQueryBase()} + q.options["disjuncts"] = []Query{} + return q.Or(queries...) +} + +// Or adds new predicate queries to this disjunction query. +func (q *DisjunctionQuery) Or(queries ...Query) *DisjunctionQuery { + q.options["disjuncts"] = append(q.options["disjuncts"].([]Query), queries...) + return q +} + +// Boost specifies the boost for this query. +func (q *DisjunctionQuery) Boost(boost float32) *DisjunctionQuery { + q.options["boost"] = boost + return q +} + +type booleanQueryData struct { + Must *ConjunctionQuery `json:"must,omitempty"` + Should *DisjunctionQuery `json:"should,omitempty"` + MustNot *DisjunctionQuery `json:"must_not,omitempty"` + Boost float32 `json:"boost,omitempty"` +} + +// BooleanQuery represents a search boolean query. +type BooleanQuery struct { + data booleanQueryData + shouldMin uint32 +} + +// NewBooleanQuery creates a new BooleanQuery. +func NewBooleanQuery() *BooleanQuery { + q := &BooleanQuery{} + return q +} + +// Must specifies a query which must match. +func (q *BooleanQuery) Must(query Query) *BooleanQuery { + switch val := query.(type) { + case ConjunctionQuery: + q.data.Must = &val + case *ConjunctionQuery: + q.data.Must = val + default: + q.data.Must = NewConjunctionQuery(val) + } + return q +} + +// Should specifies a query which should match. +func (q *BooleanQuery) Should(query Query) *BooleanQuery { + switch val := query.(type) { + case DisjunctionQuery: + q.data.Should = &val + case *DisjunctionQuery: + q.data.Should = val + default: + q.data.Should = NewDisjunctionQuery(val) + } + return q +} + +// MustNot specifies a query which must not match. +func (q *BooleanQuery) MustNot(query Query) *BooleanQuery { + switch val := query.(type) { + case DisjunctionQuery: + q.data.MustNot = &val + case *DisjunctionQuery: + q.data.MustNot = val + default: + q.data.MustNot = NewDisjunctionQuery(val) + } + return q +} + +// ShouldMin specifies the minimum value before the should query will boost. +func (q *BooleanQuery) ShouldMin(min uint32) *BooleanQuery { + q.shouldMin = min + return q +} + +// Boost specifies the boost for this query. +func (q *BooleanQuery) Boost(boost float32) *BooleanQuery { + q.data.Boost = boost + return q +} + +// MarshalJSON marshal's this query to JSON for the search REST API. +func (q *BooleanQuery) MarshalJSON() ([]byte, error) { + if q.data.Should != nil { + q.data.Should.options["min"] = q.shouldMin + } + bytes, err := json.Marshal(q.data) + if q.data.Should != nil { + delete(q.data.Should.options, "min") + } + return bytes, err +} + +// WildcardQuery represents a search wildcard query. +type WildcardQuery struct { + searchQueryBase +} + +// NewWildcardQuery creates a new WildcardQuery. +func NewWildcardQuery(wildcard string) *WildcardQuery { + q := &WildcardQuery{newSearchQueryBase()} + q.options["wildcard"] = wildcard + return q +} + +// Field specifies the field for this query. +func (q *WildcardQuery) Field(field string) *WildcardQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *WildcardQuery) Boost(boost float32) *WildcardQuery { + q.options["boost"] = boost + return q +} + +// DocIDQuery represents a search document id query. +type DocIDQuery struct { + searchQueryBase +} + +// NewDocIDQuery creates a new DocIdQuery. +func NewDocIDQuery(ids ...string) *DocIDQuery { + q := &DocIDQuery{newSearchQueryBase()} + q.options["ids"] = []string{} + return q.AddDocIds(ids...) +} + +// AddDocIds adds addition document ids to this query. +func (q *DocIDQuery) AddDocIds(ids ...string) *DocIDQuery { + q.options["ids"] = append(q.options["ids"].([]string), ids...) + return q +} + +// Field specifies the field for this query. +func (q *DocIDQuery) Field(field string) *DocIDQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *DocIDQuery) Boost(boost float32) *DocIDQuery { + q.options["boost"] = boost + return q +} + +// BooleanFieldQuery represents a search boolean field query. +type BooleanFieldQuery struct { + searchQueryBase +} + +// NewBooleanFieldQuery creates a new BooleanFieldQuery. +func NewBooleanFieldQuery(val bool) *BooleanFieldQuery { + q := &BooleanFieldQuery{newSearchQueryBase()} + q.options["bool"] = val + return q +} + +// Field specifies the field for this query. +func (q *BooleanFieldQuery) Field(field string) *BooleanFieldQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *BooleanFieldQuery) Boost(boost float32) *BooleanFieldQuery { + q.options["boost"] = boost + return q +} + +// TermQuery represents a search term query. +type TermQuery struct { + searchQueryBase +} + +// NewTermQuery creates a new TermQuery. +func NewTermQuery(term string) *TermQuery { + q := &TermQuery{newSearchQueryBase()} + q.options["term"] = term + return q +} + +// Field specifies the field for this query. +func (q *TermQuery) Field(field string) *TermQuery { + q.options["field"] = field + return q +} + +// PrefixLength specifies the prefix length from this query. +func (q *TermQuery) PrefixLength(length uint64) *TermQuery { + q.options["prefix_length"] = length + return q +} + +// Fuzziness specifies the fuziness for this query. +func (q *TermQuery) Fuzziness(fuzziness uint64) *TermQuery { + q.options["fuzziness"] = fuzziness + return q +} + +// Boost specifies the boost for this query. +func (q *TermQuery) Boost(boost float32) *TermQuery { + q.options["boost"] = boost + return q +} + +// PhraseQuery represents a search phrase query. +type PhraseQuery struct { + searchQueryBase +} + +// NewPhraseQuery creates a new PhraseQuery. +func NewPhraseQuery(terms ...string) *PhraseQuery { + q := &PhraseQuery{newSearchQueryBase()} + q.options["terms"] = terms + return q +} + +// Field specifies the field for this query. +func (q *PhraseQuery) Field(field string) *PhraseQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *PhraseQuery) Boost(boost float32) *PhraseQuery { + q.options["boost"] = boost + return q +} + +// PrefixQuery represents a search prefix query. +type PrefixQuery struct { + searchQueryBase +} + +// NewPrefixQuery creates a new PrefixQuery. +func NewPrefixQuery(prefix string) *PrefixQuery { + q := &PrefixQuery{newSearchQueryBase()} + q.options["prefix"] = prefix + return q +} + +// Field specifies the field for this query. +func (q *PrefixQuery) Field(field string) *PrefixQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *PrefixQuery) Boost(boost float32) *PrefixQuery { + q.options["boost"] = boost + return q +} + +// MatchAllQuery represents a search match all query. +type MatchAllQuery struct { + searchQueryBase +} + +// NewMatchAllQuery creates a new MatchAllQuery. +func NewMatchAllQuery() *MatchAllQuery { + q := &MatchAllQuery{newSearchQueryBase()} + q.options["match_all"] = nil + return q +} + +// MatchNoneQuery represents a search match none query. +type MatchNoneQuery struct { + searchQueryBase +} + +// NewMatchNoneQuery creates a new MatchNoneQuery. +func NewMatchNoneQuery() *MatchNoneQuery { + q := &MatchNoneQuery{newSearchQueryBase()} + q.options["match_none"] = nil + return q +} + +// TermRangeQuery represents a search term range query. +type TermRangeQuery struct { + searchQueryBase +} + +// NewTermRangeQuery creates a new TermRangeQuery. +func NewTermRangeQuery(term string) *TermRangeQuery { + q := &TermRangeQuery{newSearchQueryBase()} + q.options["term"] = term + return q +} + +// Field specifies the field for this query. +func (q *TermRangeQuery) Field(field string) *TermRangeQuery { + q.options["field"] = field + return q +} + +// Min specifies the minimum value and inclusiveness for this range query. +func (q *TermRangeQuery) Min(min string, inclusive bool) *TermRangeQuery { + q.options["min"] = min + q.options["inclusive_min"] = inclusive + return q +} + +// Max specifies the maximum value and inclusiveness for this range query. +func (q *TermRangeQuery) Max(max string, inclusive bool) *TermRangeQuery { + q.options["max"] = max + q.options["inclusive_max"] = inclusive + return q +} + +// Boost specifies the boost for this query. +func (q *TermRangeQuery) Boost(boost float32) *TermRangeQuery { + q.options["boost"] = boost + return q +} + +// GeoDistanceQuery represents a search geographical distance query. +type GeoDistanceQuery struct { + searchQueryBase +} + +// NewGeoDistanceQuery creates a new GeoDistanceQuery. +func NewGeoDistanceQuery(lon, lat float64, distance string) *GeoDistanceQuery { + q := &GeoDistanceQuery{newSearchQueryBase()} + q.options["location"] = []float64{lon, lat} + q.options["distance"] = distance + return q +} + +// Field specifies the field for this query. +func (q *GeoDistanceQuery) Field(field string) *GeoDistanceQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *GeoDistanceQuery) Boost(boost float32) *GeoDistanceQuery { + q.options["boost"] = boost + return q +} + +// GeoBoundingBoxQuery represents a search geographical bounding box query. +type GeoBoundingBoxQuery struct { + searchQueryBase +} + +// NewGeoBoundingBoxQuery creates a new GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery(tlLon, tlLat, brLon, brLat float64) *GeoBoundingBoxQuery { + q := &GeoBoundingBoxQuery{newSearchQueryBase()} + q.options["top_left"] = []float64{tlLon, tlLat} + q.options["bottom_right"] = []float64{brLon, brLat} + return q +} + +// Field specifies the field for this query. +func (q *GeoBoundingBoxQuery) Field(field string) *GeoBoundingBoxQuery { + q.options["field"] = field + return q +} + +// Boost specifies the boost for this query. +func (q *GeoBoundingBoxQuery) Boost(boost float32) *GeoBoundingBoxQuery { + q.options["boost"] = boost + return q +} diff --git a/vendor/github.com/couchbase/gocb/v2/search/sorting.go b/vendor/github.com/couchbase/gocb/v2/search/sorting.go new file mode 100644 index 000000000000..a9f07725c89f --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/search/sorting.go @@ -0,0 +1,123 @@ +package search + +import ( + "encoding/json" +) + +// SearchSort represents an search sorting for a search query. +type Sort interface { +} + +type searchSortBase struct { + options map[string]interface{} +} + +func newSearchSortBase() searchSortBase { + return searchSortBase{ + options: make(map[string]interface{}), + } +} + +// MarshalJSON marshal's this query to JSON for the search REST API. +func (q searchSortBase) MarshalJSON() ([]byte, error) { + return json.Marshal(q.options) +} + +// SearchSortScore represents a search score sort. +type SearchSortScore struct { + searchSortBase +} + +// NewSearchSortScore creates a new SearchSortScore. +func NewSearchSortScore() *SearchSortScore { + q := &SearchSortScore{newSearchSortBase()} + q.options["by"] = "score" + return q +} + +// Descending specifies the ordering of the results. +func (q *SearchSortScore) Descending(descending bool) *SearchSortScore { + q.options["desc"] = descending + return q +} + +// SearchSortID represents a search Document ID sort. +type SearchSortID struct { + searchSortBase +} + +// NewSearchSortID creates a new SearchSortScore. +func NewSearchSortID() *SearchSortID { + q := &SearchSortID{newSearchSortBase()} + q.options["by"] = "id" + return q +} + +// Descending specifies the ordering of the results. +func (q *SearchSortID) Descending(descending bool) *SearchSortID { + q.options["desc"] = descending + return q +} + +// SearchSortField represents a search field sort. +type SearchSortField struct { + searchSortBase +} + +// NewSearchSortField creates a new SearchSortField. +func NewSearchSortField(field string) *SearchSortField { + q := &SearchSortField{newSearchSortBase()} + q.options["by"] = "field" + q.options["field"] = field + return q +} + +// Type allows you to specify the search field sort type. +func (q *SearchSortField) Type(value string) *SearchSortField { + q.options["type"] = value + return q +} + +// Mode allows you to specify the search field sort mode. +func (q *SearchSortField) Mode(mode string) *SearchSortField { + q.options["mode"] = mode + return q +} + +// Missing allows you to specify the search field sort missing behaviour. +func (q *SearchSortField) Missing(missing string) *SearchSortField { + q.options["missing"] = missing + return q +} + +// Descending specifies the ordering of the results. +func (q *SearchSortField) Descending(descending bool) *SearchSortField { + q.options["desc"] = descending + return q +} + +// SearchSortGeoDistance represents a search geo sort. +type SearchSortGeoDistance struct { + searchSortBase +} + +// NewSearchSortGeoDistance creates a new SearchSortGeoDistance. +func NewSearchSortGeoDistance(field string, lon, lat float64) *SearchSortGeoDistance { + q := &SearchSortGeoDistance{newSearchSortBase()} + q.options["by"] = "geo_distance" + q.options["field"] = field + q.options["location"] = []float64{lon, lat} + return q +} + +// Unit specifies the unit used for sorting +func (q *SearchSortGeoDistance) Unit(unit string) *SearchSortGeoDistance { + q.options["unit"] = unit + return q +} + +// Descending specifies the ordering of the results. +func (q *SearchSortGeoDistance) Descending(descending bool) *SearchSortGeoDistance { + q.options["desc"] = descending + return q +} diff --git a/vendor/github.com/couchbase/gocb/v2/searchquery_options.go b/vendor/github.com/couchbase/gocb/v2/searchquery_options.go new file mode 100644 index 000000000000..3710aaa4d68e --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/searchquery_options.go @@ -0,0 +1,138 @@ +package gocb + +import ( + "time" + + cbsearch "github.com/couchbase/gocb/v2/search" +) + +// SearchHighlightStyle indicates the type of highlighting to use for a search query. +type SearchHighlightStyle string + +const ( + // DefaultHighlightStyle specifies to use the default to highlight search result hits. + DefaultHighlightStyle SearchHighlightStyle = "" + + // HTMLHighlightStyle specifies to use HTML tags to highlight search result hits. + HTMLHighlightStyle SearchHighlightStyle = "html" + + // AnsiHightlightStyle specifies to use ANSI tags to highlight search result hits. + AnsiHightlightStyle SearchHighlightStyle = "ansi" +) + +// SearchScanConsistency indicates the level of data consistency desired for a search query. +type SearchScanConsistency uint + +const ( + searchScanConsistencyNotSet SearchScanConsistency = iota + + // SearchScanConsistencyNotBounded indicates no data consistency is required. + SearchScanConsistencyNotBounded +) + +// SearchHighlightOptions are the options available for search highlighting. +type SearchHighlightOptions struct { + Style SearchHighlightStyle + Fields []string +} + +// SearchOptions represents a pending search query. +type SearchOptions struct { + ScanConsistency SearchScanConsistency + Limit uint32 + Skip uint32 + Explain bool + Highlight *SearchHighlightOptions + Fields []string + Sort []cbsearch.Sort + Facets map[string]cbsearch.Facet + ConsistentWith *MutationState + + // Raw provides a way to provide extra parameters in the request body for the query. + Raw map[string]interface{} + + Timeout time.Duration + RetryStrategy RetryStrategy + + parentSpan requestSpanContext +} + +func (opts *SearchOptions) toMap() (map[string]interface{}, error) { + data := make(map[string]interface{}) + + if opts.Limit > 0 { + data["size"] = opts.Limit + } + + if opts.Skip > 0 { + data["from"] = opts.Skip + } + + if opts.Explain { + data["explain"] = opts.Explain + } + + if len(opts.Fields) > 0 { + data["fields"] = opts.Fields + } + + if len(opts.Sort) > 0 { + data["sort"] = opts.Sort + } + + if opts.Highlight != nil { + highlight := make(map[string]interface{}) + highlight["style"] = string(opts.Highlight.Style) + highlight["fields"] = opts.Highlight.Fields + data["highlight"] = highlight + } + + if opts.Facets != nil { + facets := make(map[string]interface{}) + for k, v := range opts.Facets { + facets[k] = v + } + data["facets"] = facets + } + + if opts.ScanConsistency != 0 && opts.ConsistentWith != nil { + return nil, makeInvalidArgumentsError("ScanConsistency and ConsistentWith must be used exclusively") + } + + var ctl map[string]interface{} + + if opts.ScanConsistency != searchScanConsistencyNotSet { + consistency := make(map[string]interface{}) + + if opts.ScanConsistency == SearchScanConsistencyNotBounded { + consistency["level"] = "not_bounded" + } else { + return nil, makeInvalidArgumentsError("unexpected consistency option") + } + + ctl = map[string]interface{}{"consistency": consistency} + } + + if opts.ConsistentWith != nil { + consistency := make(map[string]interface{}) + + consistency["level"] = "at_plus" + consistency["vectors"] = opts.ConsistentWith.toSearchMutationState() + + if ctl == nil { + ctl = make(map[string]interface{}) + } + ctl["consistency"] = consistency + } + if ctl != nil { + data["ctl"] = ctl + } + + if opts.Raw != nil { + for k, v := range opts.Raw { + data[k] = v + } + } + + return data, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/subdocspecs.go b/vendor/github.com/couchbase/gocb/v2/subdocspecs.go new file mode 100644 index 000000000000..17119184fbce --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/subdocspecs.go @@ -0,0 +1,327 @@ +package gocb + +import "github.com/couchbase/gocbcore/v9/memd" + +// LookupInSpec is the representation of an operation available when calling LookupIn +type LookupInSpec struct { + op memd.SubDocOpType + path string + isXattr bool +} + +// MutateInSpec is the representation of an operation available when calling MutateIn +type MutateInSpec struct { + op memd.SubDocOpType + createPath bool + isXattr bool + path string + value interface{} + multiValue bool +} + +// GetSpecOptions are the options available to LookupIn subdoc Get operations. +type GetSpecOptions struct { + IsXattr bool +} + +// GetSpec indicates a path to be retrieved from the document. The value of the path +// can later be retrieved from the LookupResult. +// The path syntax follows query's path syntax (e.g. `foo.bar.baz`). +func GetSpec(path string, opts *GetSpecOptions) LookupInSpec { + if opts == nil { + opts = &GetSpecOptions{} + } + + return LookupInSpec{ + op: memd.SubDocOpGet, + path: path, + isXattr: opts.IsXattr, + } +} + +// ExistsSpecOptions are the options available to LookupIn subdoc Exists operations. +type ExistsSpecOptions struct { + IsXattr bool +} + +// ExistsSpec is similar to Path(), but does not actually retrieve the value from the server. +// This may save bandwidth if you only need to check for the existence of a +// path (without caring for its content). You can check the status of this +// operation by using .ContentAt (and ignoring the value) or .Exists() on the LookupResult. +func ExistsSpec(path string, opts *ExistsSpecOptions) LookupInSpec { + if opts == nil { + opts = &ExistsSpecOptions{} + } + + return LookupInSpec{ + op: memd.SubDocOpExists, + path: path, + isXattr: opts.IsXattr, + } +} + +// CountSpecOptions are the options available to LookupIn subdoc Count operations. +type CountSpecOptions struct { + IsXattr bool +} + +// CountSpec allows you to retrieve the number of items in an array or keys within an +// dictionary within an element of a document. +func CountSpec(path string, opts *CountSpecOptions) LookupInSpec { + if opts == nil { + opts = &CountSpecOptions{} + } + + return LookupInSpec{ + op: memd.SubDocOpGetCount, + path: path, + isXattr: opts.IsXattr, + } +} + +// InsertSpecOptions are the options available to subdocument Insert operations. +type InsertSpecOptions struct { + CreatePath bool + IsXattr bool +} + +// InsertSpec inserts a value at the specified path within the document. +func InsertSpec(path string, val interface{}, opts *InsertSpecOptions) MutateInSpec { + if opts == nil { + opts = &InsertSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpDictAdd, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: false, + } +} + +// UpsertSpecOptions are the options available to subdocument Upsert operations. +type UpsertSpecOptions struct { + CreatePath bool + IsXattr bool +} + +// UpsertSpec creates a new value at the specified path within the document if it does not exist, if it does exist then it +// updates it. +func UpsertSpec(path string, val interface{}, opts *UpsertSpecOptions) MutateInSpec { + if opts == nil { + opts = &UpsertSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpDictSet, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: false, + } +} + +// ReplaceSpecOptions are the options available to subdocument Replace operations. +type ReplaceSpecOptions struct { + IsXattr bool +} + +// ReplaceSpec replaces the value of the field at path. +func ReplaceSpec(path string, val interface{}, opts *ReplaceSpecOptions) MutateInSpec { + if opts == nil { + opts = &ReplaceSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpReplace, + createPath: false, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: false, + } +} + +// RemoveSpecOptions are the options available to subdocument Remove operations. +type RemoveSpecOptions struct { + IsXattr bool +} + +// RemoveSpec removes the field at path. +func RemoveSpec(path string, opts *RemoveSpecOptions) MutateInSpec { + if opts == nil { + opts = &RemoveSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpDelete, + createPath: false, + isXattr: opts.IsXattr, + path: path, + value: nil, + multiValue: false, + } +} + +// ArrayAppendSpecOptions are the options available to subdocument ArrayAppend operations. +type ArrayAppendSpecOptions struct { + CreatePath bool + IsXattr bool + // HasMultiple adds multiple values as elements to an array. + // When used `value` in the spec must be an array type + // ArrayAppend("path", []int{1,2,3,4}, ArrayAppendSpecOptions{HasMultiple:true}) => + // "path" [..., 1,2,3,4] + // + // This is a more efficient version (at both the network and server levels) + // of doing + // spec.ArrayAppend("path", 1, nil) + // spec.ArrayAppend("path", 2, nil) + // spec.ArrayAppend("path", 3, nil) + HasMultiple bool +} + +// ArrayAppendSpec adds an element(s) to the end (i.e. right) of an array +func ArrayAppendSpec(path string, val interface{}, opts *ArrayAppendSpecOptions) MutateInSpec { + if opts == nil { + opts = &ArrayAppendSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpArrayPushLast, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: opts.HasMultiple, + } +} + +// ArrayPrependSpecOptions are the options available to subdocument ArrayPrepend operations. +type ArrayPrependSpecOptions struct { + CreatePath bool + IsXattr bool + // HasMultiple adds multiple values as elements to an array. + // When used `value` in the spec must be an array type + // ArrayPrepend("path", []int{1,2,3,4}, ArrayPrependSpecOptions{HasMultiple:true}) => + // "path" [1,2,3,4, ....] + // + // This is a more efficient version (at both the network and server levels) + // of doing + // spec.ArrayPrepend("path", 1, nil) + // spec.ArrayPrepend("path", 2, nil) + // spec.ArrayPrepend("path", 3, nil) + HasMultiple bool +} + +// ArrayPrependSpec adds an element to the beginning (i.e. left) of an array +func ArrayPrependSpec(path string, val interface{}, opts *ArrayPrependSpecOptions) MutateInSpec { + if opts == nil { + opts = &ArrayPrependSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpArrayPushFirst, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: opts.HasMultiple, + } +} + +// ArrayInsertSpecOptions are the options available to subdocument ArrayInsert operations. +type ArrayInsertSpecOptions struct { + CreatePath bool + IsXattr bool + // HasMultiple adds multiple values as elements to an array. + // When used `value` in the spec must be an array type + // ArrayInsert("path[1]", []int{1,2,3,4}, ArrayInsertSpecOptions{HasMultiple:true}) => + // "path" [..., 1,2,3,4] + // + // This is a more efficient version (at both the network and server levels) + // of doing + // spec.ArrayInsert("path[2]", 1, nil) + // spec.ArrayInsert("path[3]", 2, nil) + // spec.ArrayInsert("path[4]", 3, nil) + HasMultiple bool +} + +// ArrayInsertSpec inserts an element at a given position within an array. The position should be +// specified as part of the path, e.g. path.to.array[3] +func ArrayInsertSpec(path string, val interface{}, opts *ArrayInsertSpecOptions) MutateInSpec { + if opts == nil { + opts = &ArrayInsertSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpArrayInsert, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: opts.HasMultiple, + } +} + +// ArrayAddUniqueSpecOptions are the options available to subdocument ArrayAddUnique operations. +type ArrayAddUniqueSpecOptions struct { + CreatePath bool + IsXattr bool +} + +// ArrayAddUniqueSpec adds an dictionary add unique operation to this mutation operation set. +func ArrayAddUniqueSpec(path string, val interface{}, opts *ArrayAddUniqueSpecOptions) MutateInSpec { + if opts == nil { + opts = &ArrayAddUniqueSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpArrayAddUnique, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: val, + multiValue: false, + } +} + +// CounterSpecOptions are the options available to subdocument Increment and Decrement operations. +type CounterSpecOptions struct { + CreatePath bool + IsXattr bool +} + +// IncrementSpec adds an increment operation to this mutation operation set. +func IncrementSpec(path string, delta int64, opts *CounterSpecOptions) MutateInSpec { + if opts == nil { + opts = &CounterSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpCounter, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: delta, + multiValue: false, + } +} + +// DecrementSpec adds a decrement operation to this mutation operation set. +func DecrementSpec(path string, delta int64, opts *CounterSpecOptions) MutateInSpec { + if opts == nil { + opts = &CounterSpecOptions{} + } + + return MutateInSpec{ + op: memd.SubDocOpCounter, + createPath: opts.CreatePath, + isXattr: opts.IsXattr, + path: path, + value: -delta, + multiValue: false, + } +} diff --git a/vendor/github.com/couchbase/gocb/v2/thresholdlogtracer.go b/vendor/github.com/couchbase/gocb/v2/thresholdlogtracer.go new file mode 100644 index 000000000000..c0dfb2c912ba --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/thresholdlogtracer.go @@ -0,0 +1,414 @@ +package gocb + +import ( + "encoding/json" + "sort" + "sync" + "sync/atomic" + "time" +) + +type thresholdLogGroup struct { + name string + floor time.Duration + ops []*thresholdLogSpan + lock sync.RWMutex +} + +func (g *thresholdLogGroup) init(name string, floor time.Duration, size uint32) { + g.name = name + g.floor = floor + g.ops = make([]*thresholdLogSpan, 0, size) +} + +func (g *thresholdLogGroup) recordOp(span *thresholdLogSpan) { + if span.duration < g.floor { + return + } + + // Preemptively check that we actually need to be inserted using a read lock first + // this is a performance improvement measure to avoid locking the mutex all the time. + g.lock.RLock() + if len(g.ops) == cap(g.ops) && span.duration < g.ops[0].duration { + // we are at capacity and we are faster than the fastest slow op + g.lock.RUnlock() + return + } + g.lock.RUnlock() + + g.lock.Lock() + if len(g.ops) == cap(g.ops) && span.duration < g.ops[0].duration { + // we are at capacity and we are faster than the fastest slow op + g.lock.Unlock() + return + } + + l := len(g.ops) + i := sort.Search(l, func(i int) bool { return span.duration < g.ops[i].duration }) + + // i represents the slot where it should be inserted + + if len(g.ops) < cap(g.ops) { + if i == l { + g.ops = append(g.ops, span) + } else { + g.ops = append(g.ops, nil) + copy(g.ops[i+1:], g.ops[i:]) + g.ops[i] = span + } + } else { + if i == 0 { + g.ops[i] = span + } else { + copy(g.ops[0:i-1], g.ops[1:i]) + g.ops[i-1] = span + } + } + + g.lock.Unlock() +} + +type thresholdLogItem struct { + OperationName string `json:"operation_name,omitempty"` + TotalTimeUs uint64 `json:"total_us,omitempty"` + EncodeDurationUs uint64 `json:"encode_us,omitempty"` + DispatchDurationUs uint64 `json:"dispatch_us,omitempty"` + ServerDurationUs uint64 `json:"server_us,omitempty"` + LastRemoteAddress string `json:"last_remote_address,omitempty"` + LastLocalAddress string `json:"last_local_address,omitempty"` + LastDispatchDurationUs uint64 `json:"last_dispatch_us,omitempty"` + LastOperationID string `json:"last_operation_id,omitempty"` + LastLocalID string `json:"last_local_id,omitempty"` + DocumentKey string `json:"document_key,omitempty"` +} + +type thresholdLogService struct { + Service string `json:"service"` + Count uint64 `json:"count"` + Top []thresholdLogItem `json:"top"` +} + +func (g *thresholdLogGroup) logRecordedRecords(sampleSize uint32) { + // Preallocate space to copy the ops into... + oldOps := make([]*thresholdLogSpan, sampleSize) + + g.lock.Lock() + // Escape early if we have no ops to log... + if len(g.ops) == 0 { + g.lock.Unlock() + return + } + + // Copy out our ops so we can cheaply print them out without blocking + // our ops from actually being recorded in other goroutines (which would + // effectively slow down the op pipeline for logging). + + oldOps = oldOps[0:len(g.ops)] + copy(oldOps, g.ops) + g.ops = g.ops[:0] + + g.lock.Unlock() + + jsonData := thresholdLogService{ + Service: g.name, + } + + for i := len(oldOps) - 1; i >= 0; i-- { + op := oldOps[i] + + jsonData.Top = append(jsonData.Top, thresholdLogItem{ + OperationName: op.opName, + TotalTimeUs: uint64(op.duration / time.Microsecond), + DispatchDurationUs: uint64(op.totalDispatchDuration / time.Microsecond), + ServerDurationUs: uint64(op.totalServerDuration / time.Microsecond), + EncodeDurationUs: uint64(op.totalEncodeDuration / time.Microsecond), + LastRemoteAddress: op.lastDispatchPeer, + LastDispatchDurationUs: uint64(op.lastDispatchDuration / time.Microsecond), + LastOperationID: op.lastOperationID, + LastLocalID: op.lastLocalID, + DocumentKey: op.documentKey, + }) + } + + jsonData.Count = uint64(len(jsonData.Top)) + + jsonBytes, err := json.Marshal(jsonData) + if err != nil { + logDebugf("Failed to generate threshold logging service JSON: %s", err) + } + + logInfof("Threshold Log: %s", jsonBytes) +} + +// ThresholdLoggingOptions is the set of options available for configuring threshold logging. +type ThresholdLoggingOptions struct { + ServerDurationDisabled bool + Interval time.Duration + SampleSize uint32 + KVThreshold time.Duration + ViewsThreshold time.Duration + QueryThreshold time.Duration + SearchThreshold time.Duration + AnalyticsThreshold time.Duration + ManagementThreshold time.Duration +} + +// thresholdLoggingTracer is a specialized Tracer implementation which will automatically +// log operations which fall outside of a set of thresholds. Note that this tracer is +// only safe for use within the Couchbase SDK, uses by external event sources are +// likely to fail. +type thresholdLoggingTracer struct { + Interval time.Duration + SampleSize uint32 + KVThreshold time.Duration + ViewsThreshold time.Duration + QueryThreshold time.Duration + SearchThreshold time.Duration + AnalyticsThreshold time.Duration + ManagementThreshold time.Duration + + killCh chan struct{} + refCount int32 + nextTick time.Time + kvGroup thresholdLogGroup + viewsGroup thresholdLogGroup + queryGroup thresholdLogGroup + searchGroup thresholdLogGroup + analyticsGroup thresholdLogGroup + managementGroup thresholdLogGroup +} + +func newThresholdLoggingTracer(opts *ThresholdLoggingOptions) *thresholdLoggingTracer { + if opts == nil { + opts = &ThresholdLoggingOptions{} + } + if opts.Interval == 0 { + opts.Interval = 10 * time.Second + } + if opts.SampleSize == 0 { + opts.SampleSize = 10 + } + if opts.KVThreshold == 0 { + opts.KVThreshold = 500 * time.Millisecond + } + if opts.ViewsThreshold == 0 { + opts.ViewsThreshold = 1 * time.Second + } + if opts.QueryThreshold == 0 { + opts.QueryThreshold = 1 * time.Second + } + if opts.SearchThreshold == 0 { + opts.SearchThreshold = 1 * time.Second + } + if opts.AnalyticsThreshold == 0 { + opts.AnalyticsThreshold = 1 * time.Second + } + if opts.ManagementThreshold == 0 { + opts.ManagementThreshold = 1 * time.Second + } + + t := &thresholdLoggingTracer{ + Interval: opts.Interval, + SampleSize: opts.SampleSize, + KVThreshold: opts.KVThreshold, + ViewsThreshold: opts.ViewsThreshold, + QueryThreshold: opts.QueryThreshold, + SearchThreshold: opts.SearchThreshold, + AnalyticsThreshold: opts.AnalyticsThreshold, + ManagementThreshold: opts.ManagementThreshold, + } + + t.kvGroup.init("kv", t.KVThreshold, t.SampleSize) + t.viewsGroup.init("views", t.ViewsThreshold, t.SampleSize) + t.queryGroup.init("query", t.QueryThreshold, t.SampleSize) + t.searchGroup.init("search", t.SearchThreshold, t.SampleSize) + t.analyticsGroup.init("analytics", t.AnalyticsThreshold, t.SampleSize) + t.managementGroup.init("management", t.ManagementThreshold, t.SampleSize) + + if t.killCh == nil { + t.killCh = make(chan struct{}) + } + + if t.nextTick.IsZero() { + t.nextTick = time.Now().Add(t.Interval) + } + + return t +} + +// AddRef is used internally to keep track of the number of Cluster instances referring to it. +// This is used to correctly shut down the aggregation routines once there are no longer any +// instances tracing to it. +func (t *thresholdLoggingTracer) AddRef() int32 { + newRefCount := atomic.AddInt32(&t.refCount, 1) + if newRefCount == 1 { + t.startLoggerRoutine() + } + return newRefCount +} + +// DecRef is the counterpart to AddRef (see AddRef for more information). +func (t *thresholdLoggingTracer) DecRef() int32 { + newRefCount := atomic.AddInt32(&t.refCount, -1) + if newRefCount == 0 { + t.killCh <- struct{}{} + } + return newRefCount +} + +func (t *thresholdLoggingTracer) logRecordedRecords() { + t.kvGroup.logRecordedRecords(t.SampleSize) + t.viewsGroup.logRecordedRecords(t.SampleSize) + t.queryGroup.logRecordedRecords(t.SampleSize) + t.searchGroup.logRecordedRecords(t.SampleSize) + t.analyticsGroup.logRecordedRecords(t.SampleSize) + t.managementGroup.logRecordedRecords(t.SampleSize) +} + +func (t *thresholdLoggingTracer) startLoggerRoutine() { + go t.loggerRoutine() +} + +func (t *thresholdLoggingTracer) loggerRoutine() { + for { + select { + case <-time.After(time.Until(t.nextTick)): + t.nextTick = t.nextTick.Add(t.Interval) + t.logRecordedRecords() + case <-t.killCh: + t.logRecordedRecords() + return + } + } +} + +func (t *thresholdLoggingTracer) recordOp(span *thresholdLogSpan) { + switch span.serviceName { + case "mgmt": + t.managementGroup.recordOp(span) + case "kv": + t.kvGroup.recordOp(span) + case "views": + t.viewsGroup.recordOp(span) + case "query": + t.queryGroup.recordOp(span) + case "search": + t.searchGroup.recordOp(span) + case "analytics": + t.analyticsGroup.recordOp(span) + } +} + +// StartSpan belongs to the Tracer interface. +func (t *thresholdLoggingTracer) StartSpan(operationName string, parentContext requestSpanContext) requestSpan { + span := &thresholdLogSpan{ + tracer: t, + opName: operationName, + startTime: time.Now(), + } + + if context, ok := parentContext.(*thresholdLogSpanContext); ok { + span.parent = context.span + } + + return span +} + +type thresholdLogSpan struct { + tracer *thresholdLoggingTracer + parent *thresholdLogSpan + opName string + startTime time.Time + serviceName string + peerAddress string + serverDuration time.Duration + duration time.Duration + totalServerDuration time.Duration + totalDispatchDuration time.Duration + totalEncodeDuration time.Duration + lastDispatchPeer string + lastDispatchDuration time.Duration + lastOperationID string + lastLocalID string + documentKey string + lock sync.Mutex +} + +func (n *thresholdLogSpan) Context() requestSpanContext { + return &thresholdLogSpanContext{n} +} + +func (n *thresholdLogSpan) SetTag(key string, value interface{}) requestSpan { + var ok bool + + switch key { + case "server_duration": + if n.serverDuration, ok = value.(time.Duration); !ok { + logDebugf("Failed to cast span server_duration tag") + } + case "couchbase.service": + if n.serviceName, ok = value.(string); !ok { + logDebugf("Failed to cast span couchbase.service tag") + } + case "peer.address": + if n.peerAddress, ok = value.(string); !ok { + logDebugf("Failed to cast span peer.address tag") + } + case "couchbase.operation_id": + if n.lastOperationID, ok = value.(string); !ok { + logDebugf("Failed to cast span couchbase.operation_id tag") + } + case "couchbase.document_key": + if n.documentKey, ok = value.(string); !ok { + logDebugf("Failed to cast span couchbase.document_key tag") + } + case "couchbase.local_id": + if n.lastLocalID, ok = value.(string); !ok { + logDebugf("Failed to cast span couchbase.local_id tag") + } + } + return n +} + +func (n *thresholdLogSpan) Finish() { + n.duration = time.Since(n.startTime) + + n.totalServerDuration += n.serverDuration + if n.opName == "dispatch" { + n.totalDispatchDuration += n.duration + n.lastDispatchPeer = n.peerAddress + n.lastDispatchDuration = n.duration + } + if n.opName == "encode" { + n.totalEncodeDuration += n.duration + } + + if n.parent != nil { + n.parent.lock.Lock() + n.parent.totalServerDuration += n.totalServerDuration + n.parent.totalDispatchDuration += n.totalDispatchDuration + n.parent.totalEncodeDuration += n.totalEncodeDuration + if n.lastDispatchPeer != "" || n.lastDispatchDuration > 0 { + n.parent.lastDispatchPeer = n.lastDispatchPeer + n.parent.lastDispatchDuration = n.lastDispatchDuration + } + if n.lastOperationID != "" { + n.parent.lastOperationID = n.lastOperationID + } + if n.lastLocalID != "" { + n.parent.lastLocalID = n.lastLocalID + } + if n.documentKey != "" { + n.parent.documentKey = n.documentKey + } + n.parent.lock.Unlock() + } + + if n.serviceName != "" { + n.tracer.recordOp(n) + } +} + +type thresholdLogSpanContext struct { + span *thresholdLogSpan +} diff --git a/vendor/github.com/couchbase/gocb/v2/token.go b/vendor/github.com/couchbase/gocb/v2/token.go new file mode 100644 index 000000000000..6d7b2cc744e3 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/token.go @@ -0,0 +1,183 @@ +package gocb + +import ( + "encoding/json" + "fmt" + "strconv" + + gocbcore "github.com/couchbase/gocbcore/v9" +) + +// MutationToken holds the mutation state information from an operation. +type MutationToken struct { + token gocbcore.MutationToken + bucketName string +} + +type bucketToken struct { + SeqNo uint64 `json:"seqno"` + VbUUID string `json:"vbuuid"` +} + +// BucketName returns the name of the bucket that this token belongs to. +func (mt MutationToken) BucketName() string { + return mt.bucketName +} + +// PartitionUUID returns the UUID of the vbucket that this token belongs to. +func (mt MutationToken) PartitionUUID() uint64 { + return uint64(mt.token.VbUUID) +} + +// PartitionID returns the ID of the vbucket that this token belongs to. +func (mt MutationToken) PartitionID() uint64 { + return uint64(mt.token.VbID) +} + +// SequenceNumber returns the sequence number of the vbucket that this token belongs to. +func (mt MutationToken) SequenceNumber() uint64 { + return uint64(mt.token.SeqNo) +} + +func (mt bucketToken) MarshalJSON() ([]byte, error) { + info := []interface{}{mt.SeqNo, mt.VbUUID} + return json.Marshal(info) +} + +func (mt *bucketToken) UnmarshalJSON(data []byte) error { + info := []interface{}{&mt.SeqNo, &mt.VbUUID} + return json.Unmarshal(data, &info) +} + +type bucketTokens map[string]*bucketToken +type mutationStateData map[string]*bucketTokens + +type searchMutationState map[string]map[string]uint64 + +// MutationState holds and aggregates MutationToken's across multiple operations. +type MutationState struct { + tokens []MutationToken +} + +// NewMutationState creates a new MutationState for tracking mutation state. +func NewMutationState(tokens ...MutationToken) *MutationState { + mt := &MutationState{} + mt.Add(tokens...) + return mt +} + +// Add includes an operation's mutation information in this mutation state. +func (mt *MutationState) Add(tokens ...MutationToken) { + for _, token := range tokens { + if token.bucketName != "" { + mt.tokens = append(mt.tokens, token) + } + } +} + +// MutationStateInternal specifies internal operations. +// Internal: This should never be used and is not supported. +type MutationStateInternal struct { + mt *MutationState +} + +// Internal return a new MutationStateInternal. +// Internal: This should never be used and is not supported. +func (mt *MutationState) Internal() *MutationStateInternal { + return &MutationStateInternal{ + mt: mt, + } +} + +// Add includes an operation's mutation information in this mutation state. +func (mti *MutationStateInternal) Add(bucket string, tokens ...gocbcore.MutationToken) { + for _, token := range tokens { + mti.mt.Add(MutationToken{ + bucketName: bucket, + token: token, + }) + } +} + +// Tokens returns the tokens belonging to the mutation state. +func (mti *MutationStateInternal) Tokens() []MutationToken { + return mti.mt.tokens +} + +// MarshalJSON marshal's this mutation state to JSON. +func (mt *MutationState) MarshalJSON() ([]byte, error) { + var data mutationStateData + for _, token := range mt.tokens { + if data == nil { + data = make(mutationStateData) + } + + bucketName := token.bucketName + if (data)[bucketName] == nil { + tokens := make(bucketTokens) + (data)[bucketName] = &tokens + } + + vbID := fmt.Sprintf("%d", token.token.VbID) + stateToken := (*(data)[bucketName])[vbID] + if stateToken == nil { + stateToken = &bucketToken{} + (*(data)[bucketName])[vbID] = stateToken + } + + stateToken.SeqNo = uint64(token.token.SeqNo) + stateToken.VbUUID = fmt.Sprintf("%d", token.token.VbUUID) + + } + + return json.Marshal(data) +} + +// UnmarshalJSON unmarshal's a mutation state from JSON. +func (mt *MutationState) UnmarshalJSON(data []byte) error { + var stateData mutationStateData + err := json.Unmarshal(data, &stateData) + if err != nil { + return err + } + + for bucketName, bTokens := range stateData { + for vbIDStr, stateToken := range *bTokens { + vbID, err := strconv.Atoi(vbIDStr) + if err != nil { + return err + } + vbUUID, err := strconv.Atoi(stateToken.VbUUID) + if err != nil { + return err + } + token := MutationToken{ + bucketName: bucketName, + token: gocbcore.MutationToken{ + VbID: uint16(vbID), + VbUUID: gocbcore.VbUUID(vbUUID), + SeqNo: gocbcore.SeqNo(stateToken.SeqNo), + }, + } + + mt.tokens = append(mt.tokens, token) + } + } + + return nil +} + +// toSearchMutationState is specific to search, search doesn't accept tokens in the same format as other services. +func (mt *MutationState) toSearchMutationState() searchMutationState { + data := make(searchMutationState) + for _, token := range mt.tokens { + _, ok := data[token.bucketName] + if !ok { + data[token.bucketName] = make(map[string]uint64) + } + + data[token.bucketName][fmt.Sprintf("%d/%d", token.token.VbID, token.token.VbUUID)] = uint64(token.token.SeqNo) + } + + return data +} diff --git a/vendor/github.com/couchbase/gocb/v2/tracing.go b/vendor/github.com/couchbase/gocb/v2/tracing.go new file mode 100644 index 000000000000..78354fd3ece5 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/tracing.go @@ -0,0 +1,97 @@ +package gocb + +import ( + "github.com/couchbase/gocbcore/v9" +) + +func tracerAddRef(tracer requestTracer) { + if tracer == nil { + return + } + if refTracer, ok := tracer.(interface { + AddRef() int32 + }); ok { + refTracer.AddRef() + } +} + +func tracerDecRef(tracer requestTracer) { + if tracer == nil { + return + } + if refTracer, ok := tracer.(interface { + DecRef() int32 + }); ok { + refTracer.DecRef() + } +} + +// requestTracer describes the tracing abstraction in the SDK. +type requestTracer interface { + StartSpan(operationName string, parentContext requestSpanContext) requestSpan +} + +// requestSpan is the interface for spans that are created by a requestTracer. +type requestSpan interface { + Finish() + Context() requestSpanContext + SetTag(key string, value interface{}) requestSpan +} + +// requestSpanContext is the interface for for external span contexts that can be passed in into the SDK option blocks. +type requestSpanContext interface { +} + +type requestTracerWrapper struct { + tracer requestTracer +} + +func (tracer *requestTracerWrapper) StartSpan(operationName string, parentContext gocbcore.RequestSpanContext) gocbcore.RequestSpan { + return requestSpanWrapper{ + span: tracer.tracer.StartSpan(operationName, parentContext), + } +} + +type requestSpanWrapper struct { + span requestSpan +} + +func (span requestSpanWrapper) Finish() { + span.span.Finish() +} + +func (span requestSpanWrapper) Context() gocbcore.RequestSpanContext { + return span.span.Context() +} + +func (span requestSpanWrapper) SetTag(key string, value interface{}) gocbcore.RequestSpan { + span.span = span.span.SetTag(key, value) + return span +} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} +) + +// noopTracer will have a future use so we tell the linter not to flag it. +type noopTracer struct { // nolint: unused +} + +func (tracer *noopTracer) StartSpan(operationName string, parentContext requestSpanContext) requestSpan { + return defaultNoopSpan +} + +func (span noopSpan) Finish() { +} + +func (span noopSpan) Context() requestSpanContext { + return defaultNoopSpanContext +} + +func (span noopSpan) SetTag(key string, value interface{}) requestSpan { + return defaultNoopSpan +} diff --git a/vendor/github.com/couchbase/gocb/v2/transcoding.go b/vendor/github.com/couchbase/gocb/v2/transcoding.go new file mode 100644 index 000000000000..702a50591057 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/transcoding.go @@ -0,0 +1,398 @@ +package gocb + +import ( + "encoding/json" + + gocbcore "github.com/couchbase/gocbcore/v9" + "github.com/pkg/errors" +) + +// Transcoder provides an interface for transforming Go values to and +// from raw bytes for storage and retreival from Couchbase data storage. +type Transcoder interface { + // Decodes retrieved bytes into a Go type. + Decode([]byte, uint32, interface{}) error + + // Encodes a Go type into bytes for storage. + Encode(interface{}) ([]byte, uint32, error) +} + +// JSONTranscoder implements the default transcoding behavior and applies JSON transcoding to all values. +// +// This will apply the following behavior to the value: +// binary ([]byte) -> error. +// default -> JSON value, JSON Flags. +type JSONTranscoder struct { +} + +// NewJSONTranscoder returns a new JSONTranscoder. +func NewJSONTranscoder() *JSONTranscoder { + return &JSONTranscoder{} +} + +// Decode applies JSON transcoding behaviour to decode into a Go type. +func (t *JSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error { + valueType, compression := gocbcore.DecodeCommonFlags(flags) + + // Make sure compression is disabled + if compression != gocbcore.NoCompression { + return errors.New("unexpected value compression") + } + + // Normal types of decoding + if valueType == gocbcore.BinaryType { + return errors.New("binary datatype is not supported by JSONTranscoder") + } else if valueType == gocbcore.StringType { + return errors.New("string datatype is not supported by JSONTranscoder") + } else if valueType == gocbcore.JSONType { + err := json.Unmarshal(bytes, &out) + if err != nil { + return err + } + return nil + } + + return errors.New("unexpected expectedFlags value") +} + +// Encode applies JSON transcoding behaviour to encode a Go type. +func (t *JSONTranscoder) Encode(value interface{}) ([]byte, uint32, error) { + var bytes []byte + var flags uint32 + var err error + + switch typeValue := value.(type) { + case []byte: + return nil, 0, errors.New("binary data is not supported by JSONTranscoder") + case *[]byte: + return nil, 0, errors.New("binary data is not supported by JSONTranscoder") + case json.RawMessage: + bytes = typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *json.RawMessage: + bytes = *typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *interface{}: + return t.Encode(*typeValue) + default: + bytes, err = json.Marshal(value) + if err != nil { + return nil, 0, err + } + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + } + + // No compression supported currently + + return bytes, flags, nil +} + +// RawJSONTranscoder implements passthrough behavior of JSON data. This transcoder does not apply any serialization. +// It will forward data across the network without incurring unnecessary parsing costs. +// +// This will apply the following behavior to the value: +// binary ([]byte) -> JSON bytes, JSON expectedFlags. +// string -> JSON bytes, JSON expectedFlags. +// default -> error. +type RawJSONTranscoder struct { +} + +// NewRawJSONTranscoder returns a new RawJSONTranscoder. +func NewRawJSONTranscoder() *RawJSONTranscoder { + return &RawJSONTranscoder{} +} + +// Decode applies raw JSON transcoding behaviour to decode into a Go type. +func (t *RawJSONTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error { + valueType, compression := gocbcore.DecodeCommonFlags(flags) + + // Make sure compression is disabled + if compression != gocbcore.NoCompression { + return errors.New("unexpected value compression") + } + + // Normal types of decoding + if valueType == gocbcore.BinaryType { + return errors.New("binary datatype is not supported by RawJSONTranscoder") + } else if valueType == gocbcore.StringType { + return errors.New("string datatype is not supported by RawJSONTranscoder") + } else if valueType == gocbcore.JSONType { + switch typedOut := out.(type) { + case *[]byte: + *typedOut = bytes + return nil + case *string: + *typedOut = string(bytes) + return nil + default: + return errors.New("you must encode raw JSON data in a byte array or string") + } + } + + return errors.New("unexpected expectedFlags value") +} + +// Encode applies raw JSON transcoding behaviour to encode a Go type. +func (t *RawJSONTranscoder) Encode(value interface{}) ([]byte, uint32, error) { + var bytes []byte + var flags uint32 + + switch typeValue := value.(type) { + case []byte: + bytes = typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *[]byte: + bytes = *typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case string: + bytes = []byte(typeValue) + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *string: + bytes = []byte(*typeValue) + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case json.RawMessage: + bytes = typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *json.RawMessage: + bytes = *typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *interface{}: + return t.Encode(*typeValue) + default: + return nil, 0, makeInvalidArgumentsError("only binary and string data is supported by RawJSONTranscoder") + } + + // No compression supported currently + + return bytes, flags, nil +} + +// RawStringTranscoder implements passthrough behavior of raw string data. This transcoder does not apply any serialization. +// +// This will apply the following behavior to the value: +// string -> string bytes, string expectedFlags. +// default -> error. +type RawStringTranscoder struct { +} + +// NewRawStringTranscoder returns a new RawStringTranscoder. +func NewRawStringTranscoder() *RawStringTranscoder { + return &RawStringTranscoder{} +} + +// Decode applies raw string transcoding behaviour to decode into a Go type. +func (t *RawStringTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error { + valueType, compression := gocbcore.DecodeCommonFlags(flags) + + // Make sure compression is disabled + if compression != gocbcore.NoCompression { + return errors.New("unexpected value compression") + } + + // Normal types of decoding + if valueType == gocbcore.BinaryType { + return errors.New("only string datatype is supported by RawStringTranscoder") + } else if valueType == gocbcore.StringType { + switch typedOut := out.(type) { + case *string: + *typedOut = string(bytes) + return nil + case *interface{}: + *typedOut = string(bytes) + return nil + default: + return errors.New("you must encode a string in a string or interface") + } + } else if valueType == gocbcore.JSONType { + return errors.New("only string datatype is supported by RawStringTranscoder") + } + + return errors.New("unexpected expectedFlags value") +} + +// Encode applies raw string transcoding behaviour to encode a Go type. +func (t *RawStringTranscoder) Encode(value interface{}) ([]byte, uint32, error) { + var bytes []byte + var flags uint32 + + switch typeValue := value.(type) { + case string: + bytes = []byte(typeValue) + flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression) + case *string: + bytes = []byte(*typeValue) + flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression) + case *interface{}: + return t.Encode(*typeValue) + default: + return nil, 0, makeInvalidArgumentsError("only raw string data is supported by RawStringTranscoder") + } + + // No compression supported currently + + return bytes, flags, nil +} + +// RawBinaryTranscoder implements passthrough behavior of raw binary data. This transcoder does not apply any serialization. +// +// This will apply the following behavior to the value: +// binary ([]byte) -> binary bytes, binary expectedFlags. +// default -> error. +type RawBinaryTranscoder struct { +} + +// NewRawBinaryTranscoder returns a new RawBinaryTranscoder. +func NewRawBinaryTranscoder() *RawBinaryTranscoder { + return &RawBinaryTranscoder{} +} + +// Decode applies raw binary transcoding behaviour to decode into a Go type. +func (t *RawBinaryTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error { + valueType, compression := gocbcore.DecodeCommonFlags(flags) + + // Make sure compression is disabled + if compression != gocbcore.NoCompression { + return errors.New("unexpected value compression") + } + + // Normal types of decoding + if valueType == gocbcore.BinaryType { + switch typedOut := out.(type) { + case *[]byte: + *typedOut = bytes + return nil + case *interface{}: + *typedOut = bytes + return nil + default: + return errors.New("you must encode binary in a byte array or interface") + } + } else if valueType == gocbcore.StringType { + return errors.New("only binary datatype is supported by RawBinaryTranscoder") + } else if valueType == gocbcore.JSONType { + return errors.New("only binary datatype is supported by RawBinaryTranscoder") + } + + return errors.New("unexpected expectedFlags value") +} + +// Encode applies raw binary transcoding behaviour to encode a Go type. +func (t *RawBinaryTranscoder) Encode(value interface{}) ([]byte, uint32, error) { + var bytes []byte + var flags uint32 + + switch typeValue := value.(type) { + case []byte: + bytes = typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression) + case *[]byte: + bytes = *typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression) + case *interface{}: + return t.Encode(*typeValue) + default: + return nil, 0, makeInvalidArgumentsError("only raw binary data is supported by RawBinaryTranscoder") + } + + // No compression supported currently + + return bytes, flags, nil +} + +// LegacyTranscoder implements the behaviour for a backward-compatible transcoder. This transcoder implements +// behaviour matching that of gocb v1. +// +// This will apply the following behavior to the value: +// binary ([]byte) -> binary bytes, Binary expectedFlags. +// string -> string bytes, String expectedFlags. +// default -> JSON value, JSON expectedFlags. +type LegacyTranscoder struct { +} + +// NewLegacyTranscoder returns a new LegacyTranscoder. +func NewLegacyTranscoder() *LegacyTranscoder { + return &LegacyTranscoder{} +} + +// Decode applies legacy transcoding behaviour to decode into a Go type. +func (t *LegacyTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error { + valueType, compression := gocbcore.DecodeCommonFlags(flags) + + // Make sure compression is disabled + if compression != gocbcore.NoCompression { + return errors.New("unexpected value compression") + } + + // Normal types of decoding + if valueType == gocbcore.BinaryType { + switch typedOut := out.(type) { + case *[]byte: + *typedOut = bytes + return nil + case *interface{}: + *typedOut = bytes + return nil + default: + return errors.New("you must encode binary in a byte array or interface") + } + } else if valueType == gocbcore.StringType { + switch typedOut := out.(type) { + case *string: + *typedOut = string(bytes) + return nil + case *interface{}: + *typedOut = string(bytes) + return nil + default: + return errors.New("you must encode a string in a string or interface") + } + } else if valueType == gocbcore.JSONType { + err := json.Unmarshal(bytes, &out) + if err != nil { + return err + } + return nil + } + + return errors.New("unexpected expectedFlags value") +} + +// Encode applies legacy transcoding behavior to encode a Go type. +func (t *LegacyTranscoder) Encode(value interface{}) ([]byte, uint32, error) { + var bytes []byte + var flags uint32 + var err error + + switch typeValue := value.(type) { + case []byte: + bytes = typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression) + case *[]byte: + bytes = *typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.BinaryType, gocbcore.NoCompression) + case string: + bytes = []byte(typeValue) + flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression) + case *string: + bytes = []byte(*typeValue) + flags = gocbcore.EncodeCommonFlags(gocbcore.StringType, gocbcore.NoCompression) + case json.RawMessage: + bytes = typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *json.RawMessage: + bytes = *typeValue + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + case *interface{}: + return t.Encode(*typeValue) + default: + bytes, err = json.Marshal(value) + if err != nil { + return nil, 0, err + } + flags = gocbcore.EncodeCommonFlags(gocbcore.JSONType, gocbcore.NoCompression) + } + + // No compression supported currently + + return bytes, flags, nil +} diff --git a/vendor/github.com/couchbase/gocb/v2/version.go b/vendor/github.com/couchbase/gocb/v2/version.go new file mode 100644 index 000000000000..aa00b67a5d18 --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/version.go @@ -0,0 +1,11 @@ +package gocb + +// Version returns a string representation of the current SDK version. +func Version() string { + return goCbVersionStr +} + +// Identifier returns a string representation of the current SDK identifier. +func Identifier() string { + return "gocb/" + goCbVersionStr +} diff --git a/vendor/github.com/couchbase/gocb/v2/viewquery_options.go b/vendor/github.com/couchbase/gocb/v2/viewquery_options.go new file mode 100644 index 000000000000..581e4c55cdea --- /dev/null +++ b/vendor/github.com/couchbase/gocb/v2/viewquery_options.go @@ -0,0 +1,211 @@ +package gocb + +import ( + "bytes" + "encoding/json" + "net/url" + "strconv" + "time" +) + +// ViewScanConsistency specifies the consistency required for a view query. +type ViewScanConsistency uint + +const ( + // ViewScanConsistencyNotBounded indicates that no special behaviour should be used. + ViewScanConsistencyNotBounded ViewScanConsistency = iota + 1 + // ViewScanConsistencyRequestPlus indicates to update the index before querying it. + ViewScanConsistencyRequestPlus + // ViewScanConsistencyUpdateAfter indicates to update the index asynchronously after querying. + ViewScanConsistencyUpdateAfter +) + +// ViewOrdering specifies the ordering for the view queries results. +type ViewOrdering uint + +const ( + // ViewOrderingAscending indicates the query results should be sorted from lowest to highest. + ViewOrderingAscending ViewOrdering = iota + 1 + // ViewOrderingDescending indicates the query results should be sorted from highest to lowest. + ViewOrderingDescending +) + +// ViewErrorMode pecifies the behaviour of the query engine should an error occur during the gathering of +// view index results which would result in only partial results being available. +type ViewErrorMode uint + +const ( + // ViewErrorModeContinue indicates to continue gathering results on error. + ViewErrorModeContinue ViewErrorMode = iota + 1 + + // ViewErrorModeStop indicates to stop gathering results on error + ViewErrorModeStop +) + +// ViewOptions represents the options available when executing view query. +type ViewOptions struct { + ScanConsistency ViewScanConsistency + Skip uint32 + Limit uint32 + Order ViewOrdering + Reduce bool + Group bool + GroupLevel uint32 + Key interface{} + Keys []interface{} + StartKey interface{} + EndKey interface{} + InclusiveEnd bool + StartKeyDocID string + EndKeyDocID string + OnError ViewErrorMode + Debug bool + + // Raw provides a way to provide extra parameters in the request body for the query. + Raw map[string]string + + Namespace DesignDocumentNamespace + + Timeout time.Duration + RetryStrategy RetryStrategy + + parentSpan requestSpanContext +} + +func (opts *ViewOptions) toURLValues() (*url.Values, error) { + options := &url.Values{} + + if opts.ScanConsistency != 0 { + if opts.ScanConsistency == ViewScanConsistencyRequestPlus { + options.Set("stale", "false") + } else if opts.ScanConsistency == ViewScanConsistencyNotBounded { + options.Set("stale", "ok") + } else if opts.ScanConsistency == ViewScanConsistencyUpdateAfter { + options.Set("stale", "update_after") + } else { + return nil, makeInvalidArgumentsError("unexpected stale option") + } + } + + if opts.Skip != 0 { + options.Set("skip", strconv.FormatUint(uint64(opts.Skip), 10)) + } + + if opts.Limit != 0 { + options.Set("limit", strconv.FormatUint(uint64(opts.Limit), 10)) + } + + if opts.Order != 0 { + if opts.Order == ViewOrderingAscending { + options.Set("descending", "false") + } else if opts.Order == ViewOrderingDescending { + options.Set("descending", "true") + } else { + return nil, makeInvalidArgumentsError("unexpected order option") + } + } + + options.Set("reduce", "false") // is this line necessary? + if opts.Reduce { + options.Set("reduce", "true") + + // Only set group if a reduce view + options.Set("group", "false") // is this line necessary? + if opts.Group { + options.Set("group", "true") + } + + if opts.GroupLevel != 0 { + options.Set("group_level", strconv.FormatUint(uint64(opts.GroupLevel), 10)) + } + } + + if opts.Key != nil { + jsonKey, err := opts.marshalJSON(opts.Key) + if err != nil { + return nil, err + } + options.Set("key", string(jsonKey)) + } + + if len(opts.Keys) > 0 { + jsonKeys, err := opts.marshalJSON(opts.Keys) + if err != nil { + return nil, err + } + options.Set("keys", string(jsonKeys)) + } + + if opts.StartKey != nil { + jsonStartKey, err := opts.marshalJSON(opts.StartKey) + if err != nil { + return nil, err + } + options.Set("startkey", string(jsonStartKey)) + } else { + options.Del("startkey") + } + + if opts.EndKey != nil { + jsonEndKey, err := opts.marshalJSON(opts.EndKey) + if err != nil { + return nil, err + } + options.Set("endkey", string(jsonEndKey)) + } else { + options.Del("endkey") + } + + if opts.StartKey != nil || opts.EndKey != nil { + if opts.InclusiveEnd { + options.Set("inclusive_end", "true") + } else { + options.Set("inclusive_end", "false") + } + } + + if opts.StartKeyDocID == "" { + options.Del("startkey_docid") + } else { + options.Set("startkey_docid", opts.StartKeyDocID) + } + + if opts.EndKeyDocID == "" { + options.Del("endkey_docid") + } else { + options.Set("endkey_docid", opts.EndKeyDocID) + } + + if opts.OnError > 0 { + if opts.OnError == ViewErrorModeContinue { + options.Set("on_error", "continue") + } else if opts.OnError == ViewErrorModeStop { + options.Set("on_error", "stop") + } else { + return nil, makeInvalidArgumentsError("unexpected onerror option") + } + } + + if opts.Debug { + options.Set("debug", "true") + } + + if opts.Raw != nil { + for k, v := range opts.Raw { + options.Set(k, v) + } + } + + return options, nil +} + +func (opts *ViewOptions) marshalJSON(value interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(value) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/.golangci.yml b/vendor/github.com/couchbase/gocbcore/v9/.golangci.yml new file mode 100644 index 000000000000..e8f774aecf01 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/.golangci.yml @@ -0,0 +1,18 @@ +run: + modules-download-mode: readonly + tests: false + skip-files: + - logging.go # Logging has some utility functions that are useful to have around which get flagged up +linters: + enable: + - bodyclose + - golint + - gosec + - unconvert +linters-settings: + golint: + set-exit-status: true + min-confidence: 0.81 + errcheck: + check-type-assertions: true + check-blank: true diff --git a/vendor/github.com/couchbase/gocbcore/v9/LICENSE b/vendor/github.com/couchbase/gocbcore/v9/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/couchbase/gocbcore/v9/Makefile b/vendor/github.com/couchbase/gocbcore/v9/Makefile new file mode 100644 index 000000000000..2dcc204b3086 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/Makefile @@ -0,0 +1,24 @@ +devsetup: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + go get github.com/vektra/mockery/.../ + +test: + go test ./... +fasttest: + go test -short ./... + +cover: + go test -coverprofile=cover.out ./... + +lint: + golangci-lint run -v + +check: lint + go test -cover -race ./... + +updatemocks: + mockery -name dispatcher -output . -testonly -inpkg + mockery -name tracerManager -output . -testonly -inpkg + mockery -name configManager -output . -testonly -inpkg + +.PHONY: all test devsetup fasttest lint cover checkerrs checkfmt checkvet checkiea checkspell check updatemocks diff --git a/vendor/github.com/couchbase/gocbcore/v9/README.md b/vendor/github.com/couchbase/gocbcore/v9/README.md new file mode 100644 index 000000000000..4852465eb5bc --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/README.md @@ -0,0 +1,22 @@ +# Couchbase Go Core + +This package provides the underlying Couchbase IO for the gocb project. +If you are looking for the Couchbase Go SDK, you are probably looking for +[gocb](https://github.com/couchbase/gocb). + + +## Branching Strategy +The gocbcore library maintains a branch for each previous major revision +of its API. These branches are introduced just prior to any API breaking +changes. Active work is performed on the master branch, with releases +being performed as tags. Work made on master which are not yet part of a +tagged released should be considered liable to change. + +## License +Copyright 2017 Couchbase Inc. + +Licensed under the Apache License, Version 2.0. + +See +[LICENSE](https://github.com/couchbase/gocbcore/blob/master/LICENSE) +for further details. diff --git a/vendor/github.com/couchbase/gocbcore/v9/agent.go b/vendor/github.com/couchbase/gocbcore/v9/agent.go new file mode 100644 index 000000000000..23c3a705fe4f --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/agent.go @@ -0,0 +1,572 @@ +// Package gocbcore implements methods for low-level communication +// with a Couchbase Server cluster. +package gocbcore + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "sync/atomic" + "time" +) + +// Agent represents the base client handling connections to a Couchbase Server. +// This is used internally by the higher level classes for communicating with the cluster, +// it can also be used to perform more advanced operations with a cluster. +type Agent struct { + clientID string + bucketName string + tlsConfig *dynTLSConfig + initFn memdInitFunc + defaultRetryStrategy RetryStrategy + + pollerController *pollerController + kvMux *kvMux + httpMux *httpMux + + cfgManager *configManagementComponent + errMap *errMapComponent + collections *collectionsComponent + tracer *tracerComponent + http *httpComponent + diagnostics *diagnosticsComponent + crud *crudComponent + observe *observeComponent + stats *statsComponent + n1ql *n1qlQueryComponent + analytics *analyticsQueryComponent + search *searchQueryComponent + views *viewQueryComponent + zombieLogger *zombieLoggerComponent +} + +// !!!!UNSURE WHY THESE EXIST!!!! +// ServerConnectTimeout gets the timeout for each server connection, including all authentication steps. +// func (agent *Agent) ServerConnectTimeout() time.Duration { +// return agent.kvConnectTimeout +// } +// +// // SetServerConnectTimeout sets the timeout for each server connection. +// func (agent *Agent) SetServerConnectTimeout(timeout time.Duration) { +// agent.kvConnectTimeout = timeout +// } + +// HTTPClient returns a pre-configured HTTP Client for communicating with +// Couchbase Server. You must still specify authentication information +// for any dispatched requests. +func (agent *Agent) HTTPClient() *http.Client { + return agent.http.cli +} + +// AuthFunc is invoked by the agent to authenticate a client. This function returns two channels to allow for for multi-stage +// authentication processes (such as SCRAM). The continue callback should be called when further asynchronous bootstrapping +// requests (such as select bucket) can be sent. The completed callback should be called when authentication is completed, +// or failed. It should contain any error that occurred. If completed is called before continue then continue will be called +// first internally, the success value will be determined by whether or not an error is present. +type AuthFunc func(client AuthClient, deadline time.Time, continueCb func(), completedCb func(error)) error + +// authFunc wraps AuthFunc to provide a better to the user. +type authFunc func() (completedCh chan BytesAndError, continueCh chan bool, err error) + +type authFuncHandler func(client AuthClient, deadline time.Time, mechanism AuthMechanism) authFunc + +// CreateAgent creates an agent for performing normal operations. +func CreateAgent(config *AgentConfig) (*Agent, error) { + initFn := func(client *memdClient, deadline time.Time) error { + return nil + } + + return createAgent(config, initFn) +} + +func createAgent(config *AgentConfig, initFn memdInitFunc) (*Agent, error) { + logInfof("SDK Version: gocbcore/%s", goCbCoreVersionStr) + logInfof("Creating new agent: %+v", config) + + var tlsConfig *dynTLSConfig + if config.UseTLS { + tlsConfig = createTLSConfig(config.Auth, config.TLSRootCAProvider) + } + + httpIdleConnTimeout := 4500 * time.Millisecond + if config.HTTPIdleConnectionTimeout > 0 { + httpIdleConnTimeout = config.HTTPIdleConnectionTimeout + } + + httpCli := createHTTPClient(config.HTTPMaxIdleConns, config.HTTPMaxIdleConnsPerHost, + httpIdleConnTimeout, tlsConfig) + + tracer := config.Tracer + if tracer == nil { + tracer = noopTracer{} + } + tracerCmpt := newTracerComponent(tracer, config.BucketName, config.NoRootTraceSpans) + + c := &Agent{ + clientID: formatCbUID(randomCbUID()), + bucketName: config.BucketName, + tlsConfig: tlsConfig, + initFn: initFn, + tracer: tracerCmpt, + + defaultRetryStrategy: config.DefaultRetryStrategy, + + errMap: newErrMapManager(config.BucketName), + } + + circuitBreakerConfig := config.CircuitBreakerConfig + auth := config.Auth + userAgent := config.UserAgent + useMutationTokens := config.UseMutationTokens + disableDecompression := config.DisableDecompression + useCompression := config.UseCompression + useCollections := config.UseCollections + compressionMinSize := 32 + compressionMinRatio := 0.83 + useDurations := config.UseDurations + useOutOfOrder := config.UseOutOfOrderResponses + + kvConnectTimeout := 7000 * time.Millisecond + if config.KVConnectTimeout > 0 { + kvConnectTimeout = config.KVConnectTimeout + } + + serverWaitTimeout := 5 * time.Second + + kvPoolSize := 1 + if config.KvPoolSize > 0 { + kvPoolSize = config.KvPoolSize + } + + maxQueueSize := 2048 + if config.MaxQueueSize > 0 { + maxQueueSize = config.MaxQueueSize + } + + confHTTPRetryDelay := 10 * time.Second + if config.HTTPRetryDelay > 0 { + confHTTPRetryDelay = config.HTTPRetryDelay + } + + confHTTPRedialPeriod := 10 * time.Second + if config.HTTPRedialPeriod > 0 { + confHTTPRedialPeriod = config.HTTPRedialPeriod + } + + confCccpMaxWait := 3 * time.Second + if config.CccpMaxWait > 0 { + confCccpMaxWait = config.CccpMaxWait + } + + confCccpPollPeriod := 2500 * time.Millisecond + if config.CccpPollPeriod > 0 { + confCccpPollPeriod = config.CccpPollPeriod + } + + if config.CompressionMinSize > 0 { + compressionMinSize = config.CompressionMinSize + } + if config.CompressionMinRatio > 0 { + compressionMinRatio = config.CompressionMinRatio + if compressionMinRatio >= 1.0 { + compressionMinRatio = 1.0 + } + } + if c.defaultRetryStrategy == nil { + c.defaultRetryStrategy = newFailFastRetryStrategy() + } + authMechanisms := []AuthMechanism{ + ScramSha512AuthMechanism, + ScramSha256AuthMechanism, + ScramSha1AuthMechanism} + + // PLAIN authentication is only supported over TLS + if config.UseTLS { + authMechanisms = append(authMechanisms, PlainAuthMechanism) + } + + authHandler := buildAuthHandler(auth) + + var httpEpList []string + for _, hostPort := range config.HTTPAddrs { + if !c.IsSecure() { + httpEpList = append(httpEpList, fmt.Sprintf("http://%s", hostPort)) + } else { + httpEpList = append(httpEpList, fmt.Sprintf("https://%s", hostPort)) + } + } + + if config.UseZombieLogger { + zombieLoggerInterval := 10 * time.Second + zombieLoggerSampleSize := 10 + if config.ZombieLoggerInterval > 0 { + zombieLoggerInterval = config.ZombieLoggerInterval + } + if config.ZombieLoggerSampleSize > 0 { + zombieLoggerSampleSize = config.ZombieLoggerSampleSize + } + + c.zombieLogger = newZombieLoggerComponent(zombieLoggerInterval, zombieLoggerSampleSize) + go c.zombieLogger.Start() + } + + c.cfgManager = newConfigManager( + configManagerProperties{ + NetworkType: config.NetworkType, + UseSSL: config.UseTLS, + SrcMemdAddrs: config.MemdAddrs, + SrcHTTPAddrs: httpEpList, + }, + ) + + dialer := newMemdClientDialerComponent( + memdClientDialerProps{ + ServerWaitTimeout: serverWaitTimeout, + KVConnectTimeout: kvConnectTimeout, + ClientID: c.clientID, + TLSConfig: c.tlsConfig, + CompressionMinSize: compressionMinSize, + CompressionMinRatio: compressionMinRatio, + DisableDecompression: disableDecompression, + }, + bootstrapProps{ + HelloProps: helloProps{ + CollectionsEnabled: useCollections, + MutationTokensEnabled: useMutationTokens, + CompressionEnabled: useCompression, + DurationsEnabled: useDurations, + OutOfOrderEnabled: useOutOfOrder, + }, + Bucket: c.bucketName, + UserAgent: userAgent, + AuthMechanisms: authMechanisms, + AuthHandler: authHandler, + ErrMapManager: c.errMap, + }, + circuitBreakerConfig, + c.zombieLogger, + c.tracer, + initFn, + ) + c.kvMux = newKVMux( + kvMuxProps{ + QueueSize: maxQueueSize, + PoolSize: kvPoolSize, + CollectionsEnabled: useCollections, + }, + c.cfgManager, + c.errMap, + c.tracer, + dialer, + ) + c.collections = newCollectionIDManager( + collectionIDProps{ + MaxQueueSize: config.MaxQueueSize, + DefaultRetryStrategy: c.defaultRetryStrategy, + }, + c.kvMux, + c.tracer, + c.cfgManager, + ) + c.httpMux = newHTTPMux(circuitBreakerConfig, c.cfgManager) + c.http = newHTTPComponent( + httpComponentProps{ + UserAgent: userAgent, + DefaultRetryStrategy: c.defaultRetryStrategy, + }, + httpCli, + c.httpMux, + auth, + c.tracer, + ) + + if len(config.MemdAddrs) == 0 && config.BucketName == "" { + // The http poller can't run without a bucket. We don't trigger an error for this case + // because AgentGroup users who use memcached buckets on non-default ports will end up here. + logDebugf("No bucket name specified and only http addresses specified, not running config poller") + } else { + c.pollerController = newPollerController( + newCCCPConfigController( + cccpPollerProperties{ + confCccpMaxWait: confCccpMaxWait, + confCccpPollPeriod: confCccpPollPeriod, + }, + c.kvMux, + c.cfgManager, + ), + newHTTPConfigController( + c.bucketName, + httpPollerProperties{ + httpComponent: c.http, + confHTTPRetryDelay: confHTTPRetryDelay, + confHTTPRedialPeriod: confHTTPRedialPeriod, + }, + c.httpMux, + c.cfgManager, + ), + c.cfgManager, + ) + } + + c.observe = newObserveComponent(c.collections, c.defaultRetryStrategy, c.tracer, c.kvMux) + c.crud = newCRUDComponent(c.collections, c.defaultRetryStrategy, c.tracer, c.errMap, c.kvMux) + c.stats = newStatsComponent(c.kvMux, c.defaultRetryStrategy, c.tracer) + c.n1ql = newN1QLQueryComponent(c.http, c.cfgManager, c.tracer) + c.analytics = newAnalyticsQueryComponent(c.http, c.tracer) + c.search = newSearchQueryComponent(c.http, c.tracer) + c.views = newViewQueryComponent(c.http, c.tracer) + c.diagnostics = newDiagnosticsComponent(c.kvMux, c.httpMux, c.http, c.bucketName, c.defaultRetryStrategy, c.pollerController) + + // Kick everything off. + cfg := &routeConfig{ + kvServerList: config.MemdAddrs, + mgmtEpList: httpEpList, + revID: -1, + } + + c.httpMux.OnNewRouteConfig(cfg) + c.kvMux.OnNewRouteConfig(cfg) + + if c.pollerController != nil { + go c.pollerController.Start() + } + + return c, nil +} + +func createTLSConfig(auth AuthProvider, caProvider func() *x509.CertPool) *dynTLSConfig { + return &dynTLSConfig{ + BaseConfig: &tls.Config{ + GetClientCertificate: func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := auth.Certificate(AuthCertRequest{}) + if err != nil { + return nil, err + } + + if cert == nil { + return &tls.Certificate{}, nil + } + + return cert, nil + }, + }, + Provider: caProvider, + } +} + +func createHTTPClient(maxIdleConns, maxIdleConnsPerHost int, idleTimeout time.Duration, tlsConfig *dynTLSConfig) *http.Client { + httpDialer := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + + // We set up the transport to point at the BaseConfig from the dynamic TLS system. + // We also set ForceAttemptHTTP2, which will update the base-config to support HTTP2 + // automatically, so that all configs from it will look for that. + + var httpTLSConfig *dynTLSConfig + var httpBaseTLSConfig *tls.Config + if tlsConfig != nil { + httpTLSConfig = tlsConfig.Clone() + httpBaseTLSConfig = httpTLSConfig.BaseConfig + } + + httpTransport := &http.Transport{ + TLSClientConfig: httpBaseTLSConfig, + ForceAttemptHTTP2: true, + + Dial: func(network, addr string) (net.Conn, error) { + return httpDialer.Dial(network, addr) + }, + DialTLS: func(network, addr string) (net.Conn, error) { + tcpConn, err := httpDialer.Dial(network, addr) + if err != nil { + return nil, err + } + + if httpTLSConfig == nil { + return nil, errors.New("TLS was not configured on this Agent") + } + srvTLSConfig, err := httpTLSConfig.MakeForAddr(addr) + if err != nil { + return nil, err + } + + tlsConn := tls.Client(tcpConn, srvTLSConfig) + return tlsConn, nil + }, + MaxIdleConns: maxIdleConns, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + IdleConnTimeout: idleTimeout, + } + + httpCli := &http.Client{ + Transport: httpTransport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + // All that we're doing here is setting auth on any redirects. + // For that reason we can just pull it off the oldest (first) request. + if len(via) >= 10 { + // Just duplicate the default behaviour for maximum redirects. + return errors.New("stopped after 10 redirects") + } + + oldest := via[0] + auth := oldest.Header.Get("Authorization") + if auth != "" { + req.Header.Set("Authorization", auth) + } + + return nil + }, + } + return httpCli +} + +func buildAuthHandler(auth AuthProvider) authFuncHandler { + return func(client AuthClient, deadline time.Time, mechanism AuthMechanism) authFunc { + creds, err := getKvAuthCreds(auth, client.Address()) + if err != nil { + return nil + } + + if creds.Username != "" || creds.Password != "" { + return func() (chan BytesAndError, chan bool, error) { + continueCh := make(chan bool, 1) + completedCh := make(chan BytesAndError, 1) + hasContinued := int32(0) + callErr := saslMethod(mechanism, creds.Username, creds.Password, client, deadline, func() { + // hasContinued should never be 1 here but let's guard against it. + if atomic.CompareAndSwapInt32(&hasContinued, 0, 1) { + continueCh <- true + } + }, func(err error) { + if atomic.CompareAndSwapInt32(&hasContinued, 0, 1) { + sendContinue := true + if err != nil { + sendContinue = false + } + continueCh <- sendContinue + } + completedCh <- BytesAndError{Err: err} + }) + if callErr != nil { + return nil, nil, err + } + return completedCh, continueCh, nil + } + } + + return nil + } +} + +// Close shuts down the agent, disconnecting from all servers and failing +// any outstanding operations with ErrShutdown. +func (agent *Agent) Close() error { + routeCloseErr := agent.kvMux.Close() + + poller := agent.pollerController + if poller != nil { + poller.Stop() + } + + if agent.zombieLogger != nil { + agent.zombieLogger.Stop() + } + + if poller != nil { + // Wait for our external looper goroutines to finish, note that if the + // specific looper wasn't used, it will be a nil value otherwise it + // will be an open channel till its closed to signal completion. + pollerCh := poller.Done() + if pollerCh != nil { + <-pollerCh + } + } + + // Close the transports so that they don't hold open goroutines. + agent.http.Close() + + return routeCloseErr +} + +// ClientID returns the unique id for this agent +func (agent *Agent) ClientID() string { + return agent.clientID +} + +// CapiEps returns all the available endpoints for performing +// map-reduce queries. +func (agent *Agent) CapiEps() []string { + return agent.httpMux.CapiEps() +} + +// MgmtEps returns all the available endpoints for performing +// management queries. +func (agent *Agent) MgmtEps() []string { + return agent.httpMux.MgmtEps() +} + +// N1qlEps returns all the available endpoints for performing +// N1QL queries. +func (agent *Agent) N1qlEps() []string { + return agent.httpMux.N1qlEps() +} + +// FtsEps returns all the available endpoints for performing +// FTS queries. +func (agent *Agent) FtsEps() []string { + return agent.httpMux.FtsEps() +} + +// CbasEps returns all the available endpoints for performing +// CBAS queries. +func (agent *Agent) CbasEps() []string { + return agent.httpMux.CbasEps() +} + +// HasCollectionsSupport verifies whether or not collections are available on the agent. +func (agent *Agent) HasCollectionsSupport() bool { + return agent.kvMux.SupportsCollections() +} + +// IsSecure returns whether this client is connected via SSL. +func (agent *Agent) IsSecure() bool { + return agent.tlsConfig != nil +} + +// UsingGCCCP returns whether or not the Agent is currently using GCCCP polling. +func (agent *Agent) UsingGCCCP() bool { + return agent.kvMux.SupportsGCCCP() +} + +// HasSeenConfig returns whether or not the Agent has seen a valid cluster config. This does not mean that the agent +// currently has active connections. +// Volatile: This API is subject to change at any time. +func (agent *Agent) HasSeenConfig() (bool, error) { + seen, err := agent.kvMux.ConfigRev() + if err != nil { + return false, err + } + + return seen > -1, nil +} + +// WaitUntilReady returns whether or not the Agent has seen a valid cluster config. +func (agent *Agent) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, cb WaitUntilReadyCallback) (PendingOp, error) { + return agent.diagnostics.WaitUntilReady(deadline, opts, cb) +} + +// ConfigSnapshot returns a snapshot of the underlying configuration currently in use. +func (agent *Agent) ConfigSnapshot() (*ConfigSnapshot, error) { + return agent.kvMux.ConfigSnapshot() +} + +// BucketName returns the name of the bucket that the agent is using, if any. +// Uncommitted: This API may change in the future. +func (agent *Agent) BucketName() string { + return agent.bucketName +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/agent_config.go b/vendor/github.com/couchbase/gocbcore/v9/agent_config.go new file mode 100644 index 000000000000..9803db38aa9b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/agent_config.go @@ -0,0 +1,365 @@ +package gocbcore + +import ( + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/couchbase/gocbcore/v9/connstr" +) + +func parseDurationOrInt(valStr string) (time.Duration, error) { + dur, err := time.ParseDuration(valStr) + if err != nil { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return 0, err + } + + dur = time.Duration(val) * time.Millisecond + } + + return dur, nil +} + +// AgentConfig specifies the configuration options for creation of an Agent. +type AgentConfig struct { + MemdAddrs []string + HTTPAddrs []string + BucketName string + UserAgent string + UseTLS bool + NetworkType string + Auth AuthProvider + + TLSRootCAProvider func() *x509.CertPool + + UseMutationTokens bool + UseCompression bool + UseDurations bool + DisableDecompression bool + UseOutOfOrderResponses bool + + UseCollections bool + + CompressionMinSize int + CompressionMinRatio float64 + + HTTPRedialPeriod time.Duration + HTTPRetryDelay time.Duration + CccpMaxWait time.Duration + CccpPollPeriod time.Duration + + ConnectTimeout time.Duration + KVConnectTimeout time.Duration + + KvPoolSize int + MaxQueueSize int + + HTTPMaxIdleConns int + HTTPMaxIdleConnsPerHost int + HTTPIdleConnectionTimeout time.Duration + + // Volatile: Tracer API is subject to change. + Tracer RequestTracer + NoRootTraceSpans bool + + DefaultRetryStrategy RetryStrategy + CircuitBreakerConfig CircuitBreakerConfig + + UseZombieLogger bool + ZombieLoggerInterval time.Duration + ZombieLoggerSampleSize int +} + +func (config *AgentConfig) redacted() interface{} { + newConfig := AgentConfig{} + newConfig = *config + if isLogRedactionLevelFull() { + // The slices here are still pointing at config's underlying arrays + // so we need to make them not do that. + newConfig.HTTPAddrs = append([]string(nil), newConfig.HTTPAddrs...) + for i, addr := range newConfig.HTTPAddrs { + newConfig.HTTPAddrs[i] = redactSystemData(addr) + } + newConfig.MemdAddrs = append([]string(nil), newConfig.MemdAddrs...) + for i, addr := range newConfig.MemdAddrs { + newConfig.MemdAddrs[i] = redactSystemData(addr) + } + + if newConfig.BucketName != "" { + newConfig.BucketName = redactMetaData(newConfig.BucketName) + } + } + + return newConfig +} + +// FromConnStr populates the AgentConfig with information from a +// Couchbase Connection String. +// Supported options are: +// bootstrap_on (bool) - Specifies what protocol to bootstrap on (cccp, http). +// ca_cert_path (string) - Specifies the path to a CA certificate. +// network (string) - The network type to use. +// kv_connect_timeout (duration) - Maximum period to attempt to connect to cluster in ms. +// config_poll_interval (duration) - Period to wait between CCCP config polling in ms. +// config_poll_timeout (duration) - Maximum period of time to wait for a CCCP request. +// compression (bool) - Whether to enable network-wise compression of documents. +// compression_min_size (int) - The minimal size of the document in bytes to consider compression. +// compression_min_ratio (float64) - The minimal compress ratio (compressed / original) for the document to be sent compressed. +// enable_server_durations (bool) - Whether to enable fetching server operation durations. +// max_idle_http_connections (int) - Maximum number of idle http connections in the pool. +// max_perhost_idle_http_connections (int) - Maximum number of idle http connections in the pool per host. +// idle_http_connection_timeout (duration) - Maximum length of time for an idle connection to stay in the pool in ms. +// orphaned_response_logging (bool) - Whether to enable orphaned response logging. +// orphaned_response_logging_interval (duration) - How often to print the orphan log records. +// orphaned_response_logging_sample_size (int) - The maximum number of orphan log records to track. +// dcp_priority (int) - Specifies the priority to request from the Cluster when connecting for DCP. +// enable_dcp_expiry (bool) - Whether to enable the feature to distinguish between explicit delete and expired delete on DCP. +// http_redial_period (duration) - The maximum length of time for the HTTP poller to stay connected before reconnecting. +// http_retry_delay (duration) - The length of time to wait between HTTP poller retries if connecting fails. +// kv_pool_size (int) - The number of connections to create to each kv node. +// max_queue_size (int) - The maximum number of requests that can be queued for sending per connection. +func (config *AgentConfig) FromConnStr(connStr string) error { + baseSpec, err := connstr.Parse(connStr) + if err != nil { + return err + } + + spec, err := connstr.Resolve(baseSpec) + if err != nil { + return err + } + + fetchOption := func(name string) (string, bool) { + optValue := spec.Options[name] + if len(optValue) == 0 { + return "", false + } + return optValue[len(optValue)-1], true + } + + // Grab the resolved hostnames into a set of string arrays + var httpHosts []string + for _, specHost := range spec.HttpHosts { + httpHosts = append(httpHosts, fmt.Sprintf("%s:%d", specHost.Host, specHost.Port)) + } + + var memdHosts []string + for _, specHost := range spec.MemdHosts { + memdHosts = append(memdHosts, fmt.Sprintf("%s:%d", specHost.Host, specHost.Port)) + } + + // Get bootstrap_on option to determine which, if any, of the bootstrap nodes should be cleared + switch val, _ := fetchOption("bootstrap_on"); val { + case "http": + memdHosts = nil + if len(httpHosts) == 0 { + return errors.New("bootstrap_on=http but no HTTP hosts in connection string") + } + case "cccp": + httpHosts = nil + if len(memdHosts) == 0 { + return errors.New("bootstrap_on=cccp but no CCCP/Memcached hosts in connection string") + } + case "both": + case "": + // Do nothing + break + default: + return errors.New("bootstrap_on={http,cccp,both}") + } + config.MemdAddrs = memdHosts + config.HTTPAddrs = httpHosts + + if spec.UseSsl { + cacertpaths := spec.Options["ca_cert_path"] + + if len(cacertpaths) > 0 { + roots := x509.NewCertPool() + + for _, path := range cacertpaths { + cacert, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + ok := roots.AppendCertsFromPEM(cacert) + if !ok { + return errInvalidCertificate + } + } + + config.TLSRootCAProvider = func() *x509.CertPool { + return roots + } + } + + config.UseTLS = true + } + + if spec.Bucket != "" { + config.BucketName = spec.Bucket + } + + if valStr, ok := fetchOption("network"); ok { + if valStr == "default" { + valStr = "" + } + + config.NetworkType = valStr + } + + if valStr, ok := fetchOption("kv_connect_timeout"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("kv_connect_timeout option must be a duration or a number") + } + config.KVConnectTimeout = val + } + + if valStr, ok := fetchOption("config_poll_timeout"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("config poll timeout option must be a duration or a number") + } + config.CccpMaxWait = val + } + + if valStr, ok := fetchOption("config_poll_interval"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("config pool interval option must be duration or a number") + } + config.CccpPollPeriod = val + } + + if valStr, ok := fetchOption("enable_mutation_tokens"); ok { + val, err := strconv.ParseBool(valStr) + if err != nil { + return fmt.Errorf("enable_mutation_tokens option must be a boolean") + } + config.UseMutationTokens = val + } + + if valStr, ok := fetchOption("compression"); ok { + val, err := strconv.ParseBool(valStr) + if err != nil { + return fmt.Errorf("compression option must be a boolean") + } + config.UseCompression = val + } + + if valStr, ok := fetchOption("compression_min_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("compression_min_size option must be an int") + } + config.CompressionMinSize = int(val) + } + + if valStr, ok := fetchOption("compression_min_ratio"); ok { + val, err := strconv.ParseFloat(valStr, 64) + if err != nil { + return fmt.Errorf("compression_min_size option must be an int") + } + config.CompressionMinRatio = val + } + + if valStr, ok := fetchOption("enable_server_durations"); ok { + val, err := strconv.ParseBool(valStr) + if err != nil { + return fmt.Errorf("server_duration option must be a boolean") + } + config.UseDurations = val + } + + if valStr, ok := fetchOption("max_idle_http_connections"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("http max idle connections option must be a number") + } + config.HTTPMaxIdleConns = int(val) + } + + if valStr, ok := fetchOption("max_perhost_idle_http_connections"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("max_perhost_idle_http_connections option must be a number") + } + config.HTTPMaxIdleConnsPerHost = int(val) + } + + if valStr, ok := fetchOption("idle_http_connection_timeout"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("idle_http_connection_timeout option must be a duration or a number") + } + config.HTTPIdleConnectionTimeout = val + } + + if valStr, ok := fetchOption("orphaned_response_logging"); ok { + val, err := strconv.ParseBool(valStr) + if err != nil { + return fmt.Errorf("orphaned_response_logging option must be a boolean") + } + config.UseZombieLogger = val + } + + if valStr, ok := fetchOption("orphaned_response_logging_interval"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("orphaned_response_logging_interval option must be a number") + } + config.ZombieLoggerInterval = val + } + + if valStr, ok := fetchOption("orphaned_response_logging_sample_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("orphaned_response_logging_sample_size option must be a number") + } + config.ZombieLoggerSampleSize = int(val) + } + + // This option is experimental + if valStr, ok := fetchOption("http_redial_period"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("http redial period option must be a duration or a number") + } + config.HTTPRedialPeriod = val + } + + // This option is experimental + if valStr, ok := fetchOption("http_retry_delay"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("http retry delay option must be a duration or a number") + } + config.HTTPRetryDelay = val + } + + // This option is experimental + if valStr, ok := fetchOption("kv_pool_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("kv pool size option must be a number") + } + config.KvPoolSize = int(val) + } + + // This option is experimental + if valStr, ok := fetchOption("max_queue_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("max queue size option must be a number") + } + config.MaxQueueSize = int(val) + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/agent_ops.go b/vendor/github.com/couchbase/gocbcore/v9/agent_ops.go new file mode 100644 index 000000000000..f9dc52c35ff8 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/agent_ops.go @@ -0,0 +1,272 @@ +package gocbcore + +import "github.com/couchbase/gocbcore/v9/memd" + +// GetCallback is invoked upon completion of a Get operation. +type GetCallback func(*GetResult, error) + +// Get retrieves a document. +func (agent *Agent) Get(opts GetOptions, cb GetCallback) (PendingOp, error) { + return agent.crud.Get(opts, cb) +} + +// GetAndTouchCallback is invoked upon completion of a GetAndTouch operation. +type GetAndTouchCallback func(*GetAndTouchResult, error) + +// GetAndTouch retrieves a document and updates its expiry. +func (agent *Agent) GetAndTouch(opts GetAndTouchOptions, cb GetAndTouchCallback) (PendingOp, error) { + return agent.crud.GetAndTouch(opts, cb) +} + +// GetAndLockCallback is invoked upon completion of a GetAndLock operation. +type GetAndLockCallback func(*GetAndLockResult, error) + +// GetAndLock retrieves a document and locks it. +func (agent *Agent) GetAndLock(opts GetAndLockOptions, cb GetAndLockCallback) (PendingOp, error) { + return agent.crud.GetAndLock(opts, cb) +} + +// GetReplicaCallback is invoked upon completion of a GetReplica operation. +type GetReplicaCallback func(*GetReplicaResult, error) + +// GetOneReplica retrieves a document from a replica server. +func (agent *Agent) GetOneReplica(opts GetOneReplicaOptions, cb GetReplicaCallback) (PendingOp, error) { + return agent.crud.GetOneReplica(opts, cb) +} + +// TouchCallback is invoked upon completion of a Touch operation. +type TouchCallback func(*TouchResult, error) + +// Touch updates the expiry for a document. +func (agent *Agent) Touch(opts TouchOptions, cb TouchCallback) (PendingOp, error) { + return agent.crud.Touch(opts, cb) +} + +// UnlockCallback is invoked upon completion of a Unlock operation. +type UnlockCallback func(*UnlockResult, error) + +// Unlock unlocks a locked document. +func (agent *Agent) Unlock(opts UnlockOptions, cb UnlockCallback) (PendingOp, error) { + return agent.crud.Unlock(opts, cb) +} + +// DeleteCallback is invoked upon completion of a Delete operation. +type DeleteCallback func(*DeleteResult, error) + +// Delete removes a document. +func (agent *Agent) Delete(opts DeleteOptions, cb DeleteCallback) (PendingOp, error) { + return agent.crud.Delete(opts, cb) +} + +// StoreCallback is invoked upon completion of a Add, Set or Replace operation. +type StoreCallback func(*StoreResult, error) + +// Add stores a document as long as it does not already exist. +func (agent *Agent) Add(opts AddOptions, cb StoreCallback) (PendingOp, error) { + return agent.crud.Add(opts, cb) +} + +// Set stores a document. +func (agent *Agent) Set(opts SetOptions, cb StoreCallback) (PendingOp, error) { + return agent.crud.Set(opts, cb) +} + +// Replace replaces the value of a Couchbase document with another value. +func (agent *Agent) Replace(opts ReplaceOptions, cb StoreCallback) (PendingOp, error) { + return agent.crud.Replace(opts, cb) +} + +// AdjoinCallback is invoked upon completion of a Append or Prepend operation. +type AdjoinCallback func(*AdjoinResult, error) + +// Append appends some bytes to a document. +func (agent *Agent) Append(opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) { + return agent.crud.Append(opts, cb) +} + +// Prepend prepends some bytes to a document. +func (agent *Agent) Prepend(opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) { + return agent.crud.Prepend(opts, cb) +} + +// CounterCallback is invoked upon completion of a Increment or Decrement operation. +type CounterCallback func(*CounterResult, error) + +// Increment increments the unsigned integer value in a document. +func (agent *Agent) Increment(opts CounterOptions, cb CounterCallback) (PendingOp, error) { + return agent.crud.Increment(opts, cb) +} + +// Decrement decrements the unsigned integer value in a document. +func (agent *Agent) Decrement(opts CounterOptions, cb CounterCallback) (PendingOp, error) { + return agent.crud.Decrement(opts, cb) +} + +// GetRandomCallback is invoked upon completion of a GetRandom operation. +type GetRandomCallback func(*GetRandomResult, error) + +// GetRandom retrieves the key and value of a random document stored within Couchbase Server. +func (agent *Agent) GetRandom(opts GetRandomOptions, cb GetRandomCallback) (PendingOp, error) { + return agent.crud.GetRandom(opts, cb) +} + +// GetMetaCallback is invoked upon completion of a GetMeta operation. +type GetMetaCallback func(*GetMetaResult, error) + +// GetMeta retrieves a document along with some internal Couchbase meta-data. +func (agent *Agent) GetMeta(opts GetMetaOptions, cb GetMetaCallback) (PendingOp, error) { + return agent.crud.GetMeta(opts, cb) +} + +// SetMetaCallback is invoked upon completion of a SetMeta operation. +type SetMetaCallback func(*SetMetaResult, error) + +// SetMeta stores a document along with setting some internal Couchbase meta-data. +func (agent *Agent) SetMeta(opts SetMetaOptions, cb SetMetaCallback) (PendingOp, error) { + return agent.crud.SetMeta(opts, cb) +} + +// DeleteMetaCallback is invoked upon completion of a DeleteMeta operation. +type DeleteMetaCallback func(*DeleteMetaResult, error) + +// DeleteMeta deletes a document along with setting some internal Couchbase meta-data. +func (agent *Agent) DeleteMeta(opts DeleteMetaOptions, cb DeleteMetaCallback) (PendingOp, error) { + return agent.crud.DeleteMeta(opts, cb) +} + +// StatsCallback is invoked upon completion of a Stats operation. +type StatsCallback func(*StatsResult, error) + +// Stats retrieves statistics information from the server. Note that as this +// function is an aggregator across numerous servers, there are no guarantees +// about the consistency of the results. Occasionally, some nodes may not be +// represented in the results, or there may be conflicting information between +// multiple nodes (a vbucket active on two separate nodes at once). +func (agent *Agent) Stats(opts StatsOptions, cb StatsCallback) (PendingOp, error) { + return agent.stats.Stats(opts, cb) +} + +// ObserveCallback is invoked upon completion of a Observe operation. +type ObserveCallback func(*ObserveResult, error) + +// Observe retrieves the current CAS and persistence state for a document. +func (agent *Agent) Observe(opts ObserveOptions, cb ObserveCallback) (PendingOp, error) { + return agent.observe.Observe(opts, cb) +} + +// ObserveVbCallback is invoked upon completion of a ObserveVb operation. +type ObserveVbCallback func(*ObserveVbResult, error) + +// ObserveVb retrieves the persistence state sequence numbers for a particular VBucket +// and includes additional details not included by the basic version. +func (agent *Agent) ObserveVb(opts ObserveVbOptions, cb ObserveVbCallback) (PendingOp, error) { + return agent.observe.ObserveVb(opts, cb) +} + +// SubDocOp defines a per-operation structure to be passed to MutateIn +// or LookupIn for performing many sub-document operations. +type SubDocOp struct { + Op memd.SubDocOpType + Flags memd.SubdocFlag + Path string + Value []byte +} + +// LookupInCallback is invoked upon completion of a LookupIn operation. +type LookupInCallback func(*LookupInResult, error) + +// LookupIn performs a multiple-lookup sub-document operation on a document. +func (agent *Agent) LookupIn(opts LookupInOptions, cb LookupInCallback) (PendingOp, error) { + return agent.crud.LookupIn(opts, cb) +} + +// MutateInCallback is invoked upon completion of a MutateIn operation. +type MutateInCallback func(*MutateInResult, error) + +// MutateIn performs a multiple-mutation sub-document operation on a document. +func (agent *Agent) MutateIn(opts MutateInOptions, cb MutateInCallback) (PendingOp, error) { + return agent.crud.MutateIn(opts, cb) +} + +// N1QLQueryCallback is invoked upon completion of a N1QLQuery operation. +type N1QLQueryCallback func(*N1QLRowReader, error) + +// N1QLQuery executes a N1QL query +func (agent *Agent) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + return agent.n1ql.N1QLQuery(opts, cb) +} + +// PreparedN1QLQuery executes a prepared N1QL query +func (agent *Agent) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + return agent.n1ql.PreparedN1QLQuery(opts, cb) +} + +// AnalyticsQueryCallback is invoked upon completion of a AnalyticsQuery operation. +type AnalyticsQueryCallback func(*AnalyticsRowReader, error) + +// AnalyticsQuery executes an analytics query +func (agent *Agent) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) { + return agent.analytics.AnalyticsQuery(opts, cb) +} + +// SearchQueryCallback is invoked upon completion of a SearchQuery operation. +type SearchQueryCallback func(*SearchRowReader, error) + +// SearchQuery executes a Search query +func (agent *Agent) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) { + return agent.search.SearchQuery(opts, cb) +} + +// ViewQueryCallback is invoked upon completion of a ViewQuery operation. +type ViewQueryCallback func(*ViewQueryRowReader, error) + +// ViewQuery executes a view query +func (agent *Agent) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) { + return agent.views.ViewQuery(opts, cb) +} + +// DoHTTPRequestCallback is invoked upon completion of a DoHTTPRequest operation. +type DoHTTPRequestCallback func(*HTTPResponse, error) + +// DoHTTPRequest will perform an HTTP request against one of the HTTP +// services which are available within the SDK. +func (agent *Agent) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) { + return agent.http.DoHTTPRequest(req, cb) +} + +// GetCollectionManifestCallback is invoked upon completion of a GetCollectionManifest operation. +type GetCollectionManifestCallback func(*GetCollectionManifestResult, error) + +// GetCollectionManifest fetches the current server manifest. This function will not update the client's collection +// id cache. +func (agent *Agent) GetCollectionManifest(opts GetCollectionManifestOptions, cb GetCollectionManifestCallback) (PendingOp, error) { + return agent.collections.GetCollectionManifest(opts, cb) +} + +// GetCollectionIDCallback is invoked upon completion of a GetCollectionID operation. +type GetCollectionIDCallback func(*GetCollectionIDResult, error) + +// GetCollectionID fetches the collection id and manifest id that the collection belongs to, given a scope name +// and collection name. This function will also prime the client's collection id cache. +func (agent *Agent) GetCollectionID(scopeName string, collectionName string, opts GetCollectionIDOptions, cb GetCollectionIDCallback) (PendingOp, error) { + return agent.collections.GetCollectionID(scopeName, collectionName, opts, cb) +} + +// PingCallback is invoked upon completion of a PingKv operation. +type PingCallback func(*PingResult, error) + +// Ping pings all of the servers we are connected to and returns +// a report regarding the pings that were performed. +func (agent *Agent) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) { + return agent.diagnostics.Ping(opts, cb) +} + +// Diagnostics returns diagnostics information about the client. +// Mainly containing a list of open connections and their current +// states. +func (agent *Agent) Diagnostics(opts DiagnosticsOptions) (*DiagnosticInfo, error) { + return agent.diagnostics.Diagnostics(opts) +} + +// WaitUntilReadyCallback is invoked upon completion of a WaitUntilReady operation. +type WaitUntilReadyCallback func(*WaitUntilReadyResult, error) diff --git a/vendor/github.com/couchbase/gocbcore/v9/agentgroup.go b/vendor/github.com/couchbase/gocbcore/v9/agentgroup.go new file mode 100644 index 000000000000..ed121a990bb3 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/agentgroup.go @@ -0,0 +1,250 @@ +package gocbcore + +import ( + "errors" + "sync" + "time" +) + +// AgentGroup represents a collection of agents that can be used for performing operations +// against a cluster. It holds an internal special agent type which does not create its own +// memcached connections but registers itself for cluster config updates on all agents that +// are created through it. +type AgentGroup struct { + agentsLock sync.Mutex + boundAgents map[string]*Agent + // clusterAgent holds no memcached connections but can be used for cluster level (i.e. http) operations. + // It sets its own internal state by listening to cluster config updates on underlying agents. + clusterAgent *clusterAgent + + config *AgentGroupConfig +} + +// CreateAgentGroup will return a new AgentGroup with a base config of the config provided. +// Volatile: AgentGroup is subject to change or removal. +func CreateAgentGroup(config *AgentGroupConfig) (*AgentGroup, error) { + logInfof("SDK Version: gocbcore/%s", goCbCoreVersionStr) + logInfof("Creating new agent group: %+v", config) + + c := config.toAgentConfig() + agent, err := CreateAgent(c) + if err != nil { + return nil, err + } + + ag := &AgentGroup{ + config: config, + boundAgents: make(map[string]*Agent), + } + + ag.clusterAgent = createClusterAgent(&clusterAgentConfig{ + HTTPAddrs: config.HTTPAddrs, + UserAgent: config.UserAgent, + UseTLS: config.UseTLS, + Auth: config.Auth, + TLSRootCAProvider: config.TLSRootCAProvider, + HTTPMaxIdleConns: config.HTTPMaxIdleConns, + HTTPMaxIdleConnsPerHost: config.HTTPMaxIdleConnsPerHost, + HTTPIdleConnectionTimeout: config.HTTPIdleConnectionTimeout, + Tracer: config.Tracer, + NoRootTraceSpans: config.NoRootTraceSpans, + DefaultRetryStrategy: config.DefaultRetryStrategy, + CircuitBreakerConfig: config.CircuitBreakerConfig, + }) + ag.clusterAgent.RegisterWith(agent.cfgManager) + + ag.boundAgents[config.BucketName] = agent + + return ag, nil +} + +// OpenBucket will attempt to open a new bucket against the cluster. +// If an agent using the specified bucket name already exists then this will not open a new connection. +func (ag *AgentGroup) OpenBucket(bucketName string) error { + if bucketName == "" { + return wrapError(errInvalidArgument, "bucket name cannot be empty") + } + + existing := ag.GetAgent(bucketName) + if existing != nil { + return nil + } + + config := ag.config.toAgentConfig() + config.BucketName = bucketName + + agent, err := CreateAgent(config) + if err != nil { + return err + } + + ag.clusterAgent.RegisterWith(agent.cfgManager) + + ag.agentsLock.Lock() + ag.boundAgents[bucketName] = agent + ag.agentsLock.Unlock() + ag.maybeCloseGlobalAgent() + + return nil +} + +// GetAgent will return the agent, if any, corresponding to the bucket name specified. +func (ag *AgentGroup) GetAgent(bucketName string) *Agent { + if bucketName == "" { + // We don't allow access to the global level agent. We close that agent on OpenBucket so we don't want + // to return an agent that we then later close. Doing so would only lead to pain. + return nil + } + + ag.agentsLock.Lock() + existingAgent := ag.boundAgents[bucketName] + ag.agentsLock.Unlock() + if existingAgent != nil { + return existingAgent + } + + return nil +} + +// Close will close all underlying agents. +func (ag *AgentGroup) Close() error { + var firstError error + ag.agentsLock.Lock() + for _, agent := range ag.boundAgents { + ag.clusterAgent.UnregisterWith(agent.cfgManager) + if err := agent.Close(); err != nil && firstError == nil { + firstError = err + } + } + ag.agentsLock.Unlock() + if err := ag.clusterAgent.Close(); err != nil && firstError == nil { + firstError = err + } + + return firstError +} + +// N1QLQuery executes a N1QL query against a random connected agent. +// If no agent is connected then this will block until one is available or the deadline is reached. +func (ag *AgentGroup) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + return ag.clusterAgent.N1QLQuery(opts, cb) +} + +// PreparedN1QLQuery executes a prepared N1QL query against a random connected agent. +// If no agent is connected then this will block until one is available or the deadline is reached. +func (ag *AgentGroup) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + return ag.clusterAgent.PreparedN1QLQuery(opts, cb) +} + +// AnalyticsQuery executes an analytics query against a random connected agent. +// If no agent is connected then this will block until one is available or the deadline is reached. +func (ag *AgentGroup) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) { + return ag.clusterAgent.AnalyticsQuery(opts, cb) +} + +// SearchQuery executes a Search query against a random connected agent. +// If no agent is connected then this will block until one is available or the deadline is reached. +func (ag *AgentGroup) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) { + return ag.clusterAgent.SearchQuery(opts, cb) +} + +// ViewQuery executes a view query against a random connected agent. +// If no agent is connected then this will block until one is available or the deadline is reached. +func (ag *AgentGroup) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) { + return ag.clusterAgent.ViewQuery(opts, cb) +} + +// DoHTTPRequest will perform an HTTP request against one of the HTTP +// services which are available within the SDK, using a random connected agent. +// If no agent is connected then this will block until one is available or the deadline is reached. +func (ag *AgentGroup) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) { + return ag.clusterAgent.DoHTTPRequest(req, cb) +} + +// WaitUntilReady returns whether or not the AgentGroup can ping the requested services. +func (ag *AgentGroup) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, + cb WaitUntilReadyCallback) (PendingOp, error) { + return ag.clusterAgent.WaitUntilReady(deadline, opts, cb) +} + +// Ping pings all of the servers we are connected to and returns +// a report regarding the pings that were performed. +func (ag *AgentGroup) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) { + return ag.clusterAgent.Ping(opts, cb) +} + +// Diagnostics returns diagnostics information about the client. +// Mainly containing a list of open connections and their current +// states. +func (ag *AgentGroup) Diagnostics(opts DiagnosticsOptions) (*DiagnosticInfo, error) { + var agents []*Agent + ag.agentsLock.Lock() + // There's no point in trying to get diagnostics from clusterAgent as it has no kv connections. + // In fact it doesn't even expose a Diagnostics function. + for _, agent := range ag.boundAgents { + agents = append(agents, agent) + } + ag.agentsLock.Unlock() + + if len(agents) == 0 { + return nil, errors.New("no agents available") + } + + var firstError error + var diags []*DiagnosticInfo + for _, agent := range agents { + report, err := agent.diagnostics.Diagnostics(opts) + if err != nil && firstError == nil { + firstError = err + continue + } + + diags = append(diags, report) + } + + if len(diags) == 0 { + return nil, firstError + } + + var overallReport DiagnosticInfo + var connected int + var expected int + for _, report := range diags { + expected++ + overallReport.MemdConns = append(overallReport.MemdConns, report.MemdConns...) + if report.State == ClusterStateOnline { + connected++ + } + if report.ConfigRev > overallReport.ConfigRev { + overallReport.ConfigRev = report.ConfigRev + } + } + + if connected == expected { + overallReport.State = ClusterStateOnline + } else if connected > 0 { + overallReport.State = ClusterStateDegraded + } else { + overallReport.State = ClusterStateOffline + } + + return &overallReport, nil +} + +func (ag *AgentGroup) maybeCloseGlobalAgent() { + ag.agentsLock.Lock() + // Close and delete the global level agent that we created on Connect. + agent := ag.boundAgents[""] + if agent == nil { + ag.agentsLock.Unlock() + return + } + logDebugf("Shutting down global level agent") + delete(ag.boundAgents, "") + ag.agentsLock.Unlock() + + ag.clusterAgent.UnregisterWith(agent.cfgManager) + if err := agent.Close(); err != nil { + logDebugf("Failed to close agent: %s", err) + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/agentgroup_config.go b/vendor/github.com/couchbase/gocbcore/v9/agentgroup_config.go new file mode 100644 index 000000000000..299d2d6c6e54 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/agentgroup_config.go @@ -0,0 +1,55 @@ +package gocbcore + +// AgentGroupConfig specifies the configuration options for creation of an AgentGroup. +type AgentGroupConfig struct { + AgentConfig +} + +func (config *AgentGroupConfig) redacted() interface{} { + return config.AgentConfig.redacted() +} + +// FromConnStr populates the AgentGroupConfig with information from a +// Couchbase Connection String. See AgentConfig for supported options. +func (config *AgentGroupConfig) FromConnStr(connStr string) error { + return config.AgentConfig.FromConnStr(connStr) +} + +func (config *AgentGroupConfig) toAgentConfig() *AgentConfig { + return &AgentConfig{ + MemdAddrs: config.MemdAddrs, + HTTPAddrs: config.HTTPAddrs, + BucketName: config.BucketName, + UserAgent: config.UserAgent, + UseTLS: config.UseTLS, + NetworkType: config.NetworkType, + Auth: config.Auth, + TLSRootCAProvider: config.TLSRootCAProvider, + UseMutationTokens: config.UseMutationTokens, + UseCompression: config.UseCompression, + UseDurations: config.UseDurations, + DisableDecompression: config.DisableDecompression, + UseOutOfOrderResponses: config.UseOutOfOrderResponses, + UseCollections: config.UseCollections, + CompressionMinSize: config.CompressionMinSize, + CompressionMinRatio: config.CompressionMinRatio, + HTTPRedialPeriod: config.HTTPRedialPeriod, + HTTPRetryDelay: config.HTTPRetryDelay, + CccpMaxWait: config.CccpMaxWait, + CccpPollPeriod: config.CccpPollPeriod, + ConnectTimeout: config.ConnectTimeout, + KVConnectTimeout: config.KVConnectTimeout, + KvPoolSize: config.KvPoolSize, + MaxQueueSize: config.MaxQueueSize, + HTTPMaxIdleConns: config.HTTPMaxIdleConns, + HTTPMaxIdleConnsPerHost: config.HTTPMaxIdleConnsPerHost, + HTTPIdleConnectionTimeout: config.HTTPIdleConnectionTimeout, + Tracer: config.Tracer, + NoRootTraceSpans: config.NoRootTraceSpans, + DefaultRetryStrategy: config.DefaultRetryStrategy, + CircuitBreakerConfig: config.CircuitBreakerConfig, + UseZombieLogger: config.UseZombieLogger, + ZombieLoggerInterval: config.ZombieLoggerInterval, + ZombieLoggerSampleSize: config.ZombieLoggerSampleSize, + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/analyticscomponent.go b/vendor/github.com/couchbase/gocbcore/v9/analyticscomponent.go new file mode 100644 index 000000000000..e6ac6cf03555 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/analyticscomponent.go @@ -0,0 +1,285 @@ +package gocbcore + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "time" +) + +// AnalyticsRowReader providers access to the rows of a analytics query +type AnalyticsRowReader struct { + streamer *queryStreamer +} + +// NextRow reads the next rows bytes from the stream +func (q *AnalyticsRowReader) NextRow() []byte { + return q.streamer.NextRow() +} + +// Err returns any errors that occurred during streaming. +func (q AnalyticsRowReader) Err() error { + return q.streamer.Err() +} + +// MetaData fetches the non-row bytes streamed in the response. +func (q *AnalyticsRowReader) MetaData() ([]byte, error) { + return q.streamer.MetaData() +} + +// Close immediately shuts down the connection +func (q *AnalyticsRowReader) Close() error { + return q.streamer.Close() +} + +// AnalyticsQueryOptions represents the various options available for an analytics query. +type AnalyticsQueryOptions struct { + Payload []byte + Priority int + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +func wrapAnalyticsError(req *httpRequest, statement string, err error) *AnalyticsError { + if err == nil { + err = errors.New("analytics error") + } + + ierr := &AnalyticsError{ + InnerError: err, + } + + if req != nil { + ierr.Endpoint = req.Endpoint + ierr.ClientContextID = req.UniqueID + ierr.RetryAttempts = req.RetryAttempts() + ierr.RetryReasons = req.RetryReasons() + } + + ierr.Statement = statement + + return ierr +} + +type jsonAnalyticsError struct { + Code uint32 `json:"code"` + Msg string `json:"msg"` +} + +type jsonAnalyticsErrorResponse struct { + Errors []jsonAnalyticsError +} + +func parseAnalyticsError(req *httpRequest, statement string, resp *HTTPResponse) *AnalyticsError { + var err error + var errorDescs []AnalyticsErrorDesc + + respBody, readErr := ioutil.ReadAll(resp.Body) + if readErr == nil { + var respParse jsonAnalyticsErrorResponse + parseErr := json.Unmarshal(respBody, &respParse) + if parseErr == nil { + + for _, jsonErr := range respParse.Errors { + errorDescs = append(errorDescs, AnalyticsErrorDesc{ + Code: jsonErr.Code, + Message: jsonErr.Msg, + }) + } + } + } + + if len(errorDescs) >= 1 { + firstErr := errorDescs[0] + errCode := firstErr.Code + errCodeGroup := errCode / 1000 + + if errCodeGroup == 25 { + err = errInternalServerFailure + } + if errCodeGroup == 20 { + err = errAuthenticationFailure + } + if errCodeGroup == 24 { + err = errCompilationFailure + } + if errCode == 23000 || errCode == 23003 { + err = errTemporaryFailure + } + if errCode == 24000 { + err = errParsingFailure + } + if errCode == 24047 { + err = errIndexNotFound + } + if errCode == 24048 { + err = errIndexExists + } + + if errCode == 23007 { + err = errJobQueueFull + } + if errCode == 24025 || errCode == 24044 || errCode == 24045 { + err = errDatasetNotFound + } + if errCode == 24034 { + err = errDataverseNotFound + } + if errCode == 24040 { + err = errDatasetExists + } + if errCode == 24039 { + err = errDataverseExists + } + if errCode == 24006 { + err = errLinkNotFound + } + } + + errOut := wrapAnalyticsError(req, statement, err) + errOut.Errors = errorDescs + return errOut +} + +type analyticsQueryComponent struct { + httpComponent *httpComponent + tracer *tracerComponent +} + +func newAnalyticsQueryComponent(httpComponent *httpComponent, tracer *tracerComponent) *analyticsQueryComponent { + return &analyticsQueryComponent{ + httpComponent: httpComponent, + tracer: tracer, + } +} + +// AnalyticsQuery executes an analytics query +func (aqc *analyticsQueryComponent) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) { + tracer := aqc.tracer.CreateOpTrace("AnalyticsQuery", opts.TraceContext) + defer tracer.Finish() + + var payloadMap map[string]interface{} + err := json.Unmarshal(opts.Payload, &payloadMap) + if err != nil { + return nil, wrapAnalyticsError(nil, "", wrapError(err, "expected a JSON payload")) + } + + statement := getMapValueString(payloadMap, "statement", "") + clientContextID := getMapValueString(payloadMap, "client_context_id", "") + readOnly := getMapValueBool(payloadMap, "readonly", false) + + ctx, cancel := context.WithCancel(context.Background()) + ireq := &httpRequest{ + Service: CbasService, + Method: "POST", + Path: "/query/service", + Headers: map[string]string{ + "Analytics-Priority": fmt.Sprintf("%d", opts.Priority), + }, + Body: opts.Payload, + IsIdempotent: readOnly, + UniqueID: clientContextID, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + start := time.Now() + + go func() { + ExecuteLoop: + for { + { // Produce an updated payload with the appropriate timeout + timeoutLeft := time.Until(ireq.Deadline) + payloadMap["timeout"] = timeoutLeft.String() + + newPayload, err := json.Marshal(payloadMap) + if err != nil { + cancel() + cb(nil, wrapAnalyticsError(nil, "", wrapError(err, "failed to produce payload"))) + return + } + ireq.Body = newPayload + } + + resp, err := aqc.httpComponent.DoInternalHTTPRequest(ireq, false) + if err != nil { + cancel() + // execHTTPRequest will handle retrying due to in-flight socket close based + // on whether or not IsIdempotent is set on the httpRequest + cb(nil, wrapAnalyticsError(ireq, statement, err)) + return + } + + if resp.StatusCode != 200 { + analyticsErr := parseAnalyticsError(ireq, statement, resp) + + var retryReason RetryReason + if len(analyticsErr.Errors) >= 1 { + firstErrDesc := analyticsErr.Errors[0] + + if firstErrDesc.Code == 23000 { + retryReason = AnalyticsTemporaryFailureRetryReason + } else if firstErrDesc.Code == 23003 { + retryReason = AnalyticsTemporaryFailureRetryReason + } else if firstErrDesc.Code == 23007 { + retryReason = AnalyticsTemporaryFailureRetryReason + } + } + + if retryReason == nil { + cancel() + // analyticsErr is already wrapped here + cb(nil, analyticsErr) + return + } + + shouldRetry, retryTime := retryOrchMaybeRetry(ireq, retryReason) + if !shouldRetry { + cancel() + // analyticsErr is already wrapped here + cb(nil, analyticsErr) + return + } + + select { + case <-time.After(time.Until(retryTime)): + continue ExecuteLoop + case <-time.After(time.Until(ireq.Deadline)): + cancel() + err := &TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "http", + Opaque: ireq.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: ireq.retryReasons, + RetryAttempts: ireq.retryCount, + LastDispatchedTo: ireq.Endpoint, + } + cb(nil, wrapAnalyticsError(ireq, statement, err)) + return + } + } + + streamer, err := newQueryStreamer(resp.Body, "results") + if err != nil { + cancel() + cb(nil, wrapAnalyticsError(ireq, statement, err)) + return + } + + cb(&AnalyticsRowReader{ + streamer: streamer, + }, nil) + return + } + }() + + return ireq, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/auth.go b/vendor/github.com/couchbase/gocbcore/v9/auth.go new file mode 100644 index 000000000000..e2e171d11ee1 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/auth.go @@ -0,0 +1,80 @@ +package gocbcore + +import "crypto/tls" + +// UserPassPair represents a username and password pair. +type UserPassPair struct { + Username string + Password string +} + +// AuthCredsRequest represents an authentication details request from the agent. +type AuthCredsRequest struct { + Service ServiceType + Endpoint string +} + +// AuthCertRequest represents a certificate details request from the agent. +type AuthCertRequest struct { + Service ServiceType + Endpoint string +} + +// AuthProvider is an interface to allow the agent to fetch authentication +// credentials on-demand from the application. +type AuthProvider interface { + SupportsTLS() bool + SupportsNonTLS() bool + Certificate(req AuthCertRequest) (*tls.Certificate, error) + Credentials(req AuthCredsRequest) ([]UserPassPair, error) +} + +func getSingleAuthCreds(auth AuthProvider, req AuthCredsRequest) (UserPassPair, error) { + creds, err := auth.Credentials(req) + if err != nil { + return UserPassPair{}, err + } + + if len(creds) != 1 { + return UserPassPair{}, errInvalidCredentials + } + + return creds[0], nil +} + +func getKvAuthCreds(auth AuthProvider, endpoint string) (UserPassPair, error) { + return getSingleAuthCreds(auth, AuthCredsRequest{ + Service: MemdService, + Endpoint: endpoint, + }) +} + +// PasswordAuthProvider provides a standard AuthProvider implementation +// for use with a standard username/password pair (for example, RBAC). +type PasswordAuthProvider struct { + Username string + Password string +} + +// SupportsNonTLS specifies whether this authenticator supports non-TLS connections. +func (auth PasswordAuthProvider) SupportsNonTLS() bool { + return true +} + +// SupportsTLS specifies whether this authenticator supports TLS connections. +func (auth PasswordAuthProvider) SupportsTLS() bool { + return true +} + +// Certificate directly returns a certificate chain to present for the connection. +func (auth PasswordAuthProvider) Certificate(req AuthCertRequest) (*tls.Certificate, error) { + return nil, nil +} + +// Credentials directly returns the username/password from the provider. +func (auth PasswordAuthProvider) Credentials(req AuthCredsRequest) ([]UserPassPair, error) { + return []UserPassPair{{ + Username: auth.Username, + Password: auth.Password, + }}, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/authclient.go b/vendor/github.com/couchbase/gocbcore/v9/authclient.go new file mode 100644 index 000000000000..b864c4c3a202 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/authclient.go @@ -0,0 +1,142 @@ +package gocbcore + +import ( + "crypto/sha1" // nolint: gosec + "crypto/sha256" + "crypto/sha512" + "hash" + "time" + + "github.com/couchbase/gocbcore/v9/memd" + + scram "github.com/couchbase/gocbcore/v9/scram" +) + +// AuthMechanism represents a type of auth that can be performed. +type AuthMechanism string + +const ( + // PlainAuthMechanism represents that PLAIN auth should be performed. + PlainAuthMechanism = AuthMechanism("PLAIN") + + // ScramSha1AuthMechanism represents that SCRAM SHA1 auth should be performed. + ScramSha1AuthMechanism = AuthMechanism("SCRAM_SHA1") + + // ScramSha256AuthMechanism represents that SCRAM SHA256 auth should be performed. + ScramSha256AuthMechanism = AuthMechanism("SCRAM_SHA256") + + // ScramSha512AuthMechanism represents that SCRAM SHA512 auth should be performed. + ScramSha512AuthMechanism = AuthMechanism("SCRAM_SHA512") +) + +// AuthClient exposes an interface for performing authentication on a +// connected Couchbase K/V client. +type AuthClient interface { + Address() string + SupportsFeature(feature memd.HelloFeature) bool + + SaslListMechs(deadline time.Time, cb func(mechs []AuthMechanism, err error)) error + SaslAuth(k, v []byte, deadline time.Time, cb func(b []byte, err error)) error + SaslStep(k, v []byte, deadline time.Time, cb func(err error)) error +} + +// SaslListMechsCompleted is used to contain the result and/or error from a SaslListMechs operation. +type SaslListMechsCompleted struct { + Err error + Mechs []AuthMechanism +} + +// SaslAuthPlain performs PLAIN SASL authentication against an AuthClient. +func SaslAuthPlain(username, password string, client AuthClient, deadline time.Time, cb func(err error)) error { + // Build PLAIN auth data + userBuf := []byte(username) + passBuf := []byte(password) + authData := make([]byte, 1+len(userBuf)+1+len(passBuf)) + authData[0] = 0 + copy(authData[1:], userBuf) + authData[1+len(userBuf)] = 0 + copy(authData[1+len(userBuf)+1:], passBuf) + + // Execute PLAIN authentication + err := client.SaslAuth([]byte(PlainAuthMechanism), authData, deadline, func(b []byte, err error) { + if err != nil { + cb(err) + return + } + cb(nil) + }) + if err != nil { + return err + } + + return nil +} + +func saslAuthScram(saslName []byte, newHash func() hash.Hash, username, password string, client AuthClient, + deadline time.Time, continueCb func(), completedCb func(err error)) error { + scramMgr := scram.NewClient(newHash, username, password) + + // Perform the initial SASL step + scramMgr.Step(nil) + err := client.SaslAuth(saslName, scramMgr.Out(), deadline, func(b []byte, err error) { + if err != nil && !isErrorStatus(err, memd.StatusAuthContinue) { + completedCb(err) + return + } + + if !scramMgr.Step(b) { + err = scramMgr.Err() + if err != nil { + completedCb(err) + return + } + + logErrorf("Local auth client finished before server accepted auth") + completedCb(nil) + return + } + + err = client.SaslStep(saslName, scramMgr.Out(), deadline, completedCb) + if err != nil { + completedCb(err) + return + } + + continueCb() + }) + if err != nil { + return err + } + + return nil +} + +// SaslAuthScramSha1 performs SCRAM-SHA1 SASL authentication against an AuthClient. +func SaslAuthScramSha1(username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error { + return saslAuthScram([]byte("SCRAM-SHA1"), sha1.New, username, password, client, deadline, continueCb, completedCb) +} + +// SaslAuthScramSha256 performs SCRAM-SHA256 SASL authentication against an AuthClient. +func SaslAuthScramSha256(username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error { + return saslAuthScram([]byte("SCRAM-SHA256"), sha256.New, username, password, client, deadline, continueCb, completedCb) +} + +// SaslAuthScramSha512 performs SCRAM-SHA512 SASL authentication against an AuthClient. +func SaslAuthScramSha512(username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error { + return saslAuthScram([]byte("SCRAM-SHA512"), sha512.New, username, password, client, deadline, continueCb, completedCb) +} + +func saslMethod(method AuthMechanism, username, password string, client AuthClient, deadline time.Time, continueCb func(), completedCb func(err error)) error { + switch method { + case PlainAuthMechanism: + return SaslAuthPlain(username, password, client, deadline, completedCb) + case ScramSha1AuthMechanism: + return SaslAuthScramSha1(username, password, client, deadline, continueCb, completedCb) + case ScramSha256AuthMechanism: + return SaslAuthScramSha256(username, password, client, deadline, continueCb, completedCb) + case ScramSha512AuthMechanism: + return SaslAuthScramSha512(username, password, client, deadline, continueCb, completedCb) + default: + return errNoSupportedMechanisms + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/cbcrc.go b/vendor/github.com/couchbase/gocbcore/v9/cbcrc.go new file mode 100644 index 000000000000..84fdbbc50e81 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/cbcrc.go @@ -0,0 +1,75 @@ +package gocbcore + +var crc32tab = []uint32{ + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d} + +func cbCrc(key []byte) uint32 { + crc := uint32(0xffffffff) + for x := 0; x < len(key); x++ { + crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff] + } + return (^crc) >> 16 +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/cccpcfgcontroller.go b/vendor/github.com/couchbase/gocbcore/v9/cccpcfgcontroller.go new file mode 100644 index 000000000000..5acf520b2e4a --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/cccpcfgcontroller.go @@ -0,0 +1,214 @@ +package gocbcore + +import ( + "math/rand" + "sync" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type cccpConfigController struct { + muxer *kvMux + cfgMgr *configManagementComponent + confCccpPollPeriod time.Duration + confCccpMaxWait time.Duration + + // Used exclusively for testing to overcome GOCBC-780. It allows a test to pause the cccp looper preventing + // unwanted requests from being sent to the mock once it has been setup for error map testing. + looperPauseSig chan bool + + looperStopSig chan struct{} + looperDoneSig chan struct{} + + fetchErr error + errLock sync.Mutex +} + +func newCCCPConfigController(props cccpPollerProperties, muxer *kvMux, cfgMgr *configManagementComponent) *cccpConfigController { + return &cccpConfigController{ + muxer: muxer, + cfgMgr: cfgMgr, + confCccpPollPeriod: props.confCccpPollPeriod, + confCccpMaxWait: props.confCccpMaxWait, + + looperPauseSig: make(chan bool), + looperStopSig: make(chan struct{}), + looperDoneSig: make(chan struct{}), + } +} + +type cccpPollerProperties struct { + confCccpPollPeriod time.Duration + confCccpMaxWait time.Duration +} + +func (ccc *cccpConfigController) Error() error { + ccc.errLock.Lock() + defer ccc.errLock.Unlock() + return ccc.fetchErr +} + +func (ccc *cccpConfigController) setError(err error) { + ccc.errLock.Lock() + ccc.fetchErr = err + ccc.errLock.Unlock() +} + +func (ccc *cccpConfigController) Pause(paused bool) { + ccc.looperPauseSig <- paused +} + +func (ccc *cccpConfigController) Stop() { + close(ccc.looperStopSig) +} + +func (ccc *cccpConfigController) Done() chan struct{} { + return ccc.looperDoneSig +} + +func (ccc *cccpConfigController) Reset() { + ccc.looperStopSig = make(chan struct{}) + ccc.looperDoneSig = make(chan struct{}) +} + +func (ccc *cccpConfigController) DoLoop() error { + tickTime := ccc.confCccpPollPeriod + paused := false + + logDebugf("CCCP Looper starting.") + nodeIdx := -1 + // The first time that we loop we want to skip any sleep so that we can try get a config and bootstrapped ASAP. + firstLoop := true + +Looper: + for { + if !firstLoop { + // Wait for either the agent to be shut down, or our tick time to expire + select { + case <-ccc.looperStopSig: + break Looper + case pause := <-ccc.looperPauseSig: + paused = pause + case <-time.After(tickTime): + } + } + firstLoop = false + + if paused { + continue + } + + iter, err := ccc.muxer.PipelineSnapshot() + if err != nil { + // If we have an error it indicates the client is shut down. + break + } + + numNodes := iter.NumPipelines() + if numNodes == 0 { + logDebugf("CCCPPOLL: No nodes available to poll, return upstream") + return errNoCCCPHosts + } + + if nodeIdx < 0 || nodeIdx > numNodes { + nodeIdx = rand.Intn(numNodes) + } + + var foundConfig *cfgBucket + var foundErr error + iter.Iterate(nodeIdx, func(pipeline *memdPipeline) bool { + nodeIdx = (nodeIdx + 1) % numNodes + cccpBytes, err := ccc.getClusterConfig(pipeline) + if err != nil { + logDebugf("CCCPPOLL: Failed to retrieve CCCP config. %v", err) + if isPollingFallbackError(err) { + // This error is indicative of a memcached bucket which we can't handle so return the error. + logDebugf("CCCPPOLL: CCCP not supported, returning error upstream.") + foundErr = err + return true + } + + ccc.setError(err) + return false + } + ccc.setError(nil) + + logDebugf("CCCPPOLL: Got Block: %v", string(cccpBytes)) + + hostName, err := hostFromHostPort(pipeline.Address()) + if err != nil { + logErrorf("CCCPPOLL: Failed to parse source address. %v", err) + return false + } + + bk, err := parseConfig(cccpBytes, hostName) + if err != nil { + logDebugf("CCCPPOLL: Failed to parse CCCP config. %v", err) + return false + } + + foundConfig = bk + return true + }) + if foundErr != nil { + return foundErr + } + + if foundConfig == nil { + logDebugf("CCCPPOLL: Failed to retrieve config from any node.") + continue + } + + logDebugf("CCCPPOLL: Received new config") + ccc.cfgMgr.OnNewConfig(foundConfig) + } + + close(ccc.looperDoneSig) + return nil +} + +func (ccc *cccpConfigController) getClusterConfig(pipeline *memdPipeline) (cfgOut []byte, errOut error) { + signal := make(chan struct{}, 1) + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetClusterConfig, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if resp != nil { + cfgOut = resp.Packet.Value + } + errOut = err + signal <- struct{}{} + }, + RetryStrategy: newFailFastRetryStrategy(), + } + err := pipeline.SendRequest(req) + if err != nil { + return nil, err + } + + timeoutTmr := AcquireTimer(ccc.confCccpMaxWait) + select { + case <-signal: + ReleaseTimer(timeoutTmr, false) + return + case <-timeoutTmr.C: + ReleaseTimer(timeoutTmr, true) + + // We've timed out so lets check underlying connections to see if they're responsible. + clients := pipeline.Clients() + for _, cli := range clients { + err := cli.Error() + if err != nil { + req.cancelWithCallback(err) + <-signal + return + } + } + req.cancelWithCallback(errAmbiguousTimeout) + <-signal + return + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/circuitbreaker.go b/vendor/github.com/couchbase/gocbcore/v9/circuitbreaker.go new file mode 100644 index 000000000000..8fafd4eddb3b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/circuitbreaker.go @@ -0,0 +1,208 @@ +package gocbcore + +import ( + "errors" + "sync/atomic" + "time" +) + +const ( + circuitBreakerStateDisabled uint32 = iota + circuitBreakerStateClosed + circuitBreakerStateHalfOpen + circuitBreakerStateOpen +) + +type circuitBreaker interface { + AllowsRequest() bool + MarkSuccessful() + MarkFailure() + State() uint32 + Reset() + CanaryTimeout() time.Duration + CompletionCallback(error) bool +} + +// CircuitBreakerCallback is the callback used by the circuit breaker to determine if an error should count toward +// the circuit breaker failure count. +type CircuitBreakerCallback func(error) bool + +// CircuitBreakerConfig is the set of configuration settings for configuring circuit breakers. +// If Disabled is set to true then a noop circuit breaker will be used, otherwise a lazy circuit +// breaker. +type CircuitBreakerConfig struct { + Enabled bool + VolumeThreshold int64 + ErrorThresholdPercentage float64 + SleepWindow time.Duration + RollingWindow time.Duration + CompletionCallback CircuitBreakerCallback + CanaryTimeout time.Duration +} + +type noopCircuitBreaker struct { +} + +func newNoopCircuitBreaker() *noopCircuitBreaker { + return &noopCircuitBreaker{} +} + +func (ncb *noopCircuitBreaker) AllowsRequest() bool { + return true +} + +func (ncb *noopCircuitBreaker) MarkSuccessful() { +} + +func (ncb *noopCircuitBreaker) MarkFailure() { +} + +func (ncb *noopCircuitBreaker) State() uint32 { + return circuitBreakerStateDisabled +} + +func (ncb *noopCircuitBreaker) Reset() { +} + +func (ncb *noopCircuitBreaker) CompletionCallback(error) bool { + return true +} + +func (ncb *noopCircuitBreaker) CanaryTimeout() time.Duration { + return 0 +} + +type lazyCircuitBreaker struct { + state uint32 + windowStart int64 + sleepWindow int64 + rollingWindow int64 + volumeThreshold int64 + errorPercentageThreshold float64 + canaryTimeout time.Duration + total int64 + failed int64 + openedAt int64 + sendCanaryFn func() + completionCallback CircuitBreakerCallback +} + +func newLazyCircuitBreaker(config CircuitBreakerConfig, canaryFn func()) *lazyCircuitBreaker { + if config.VolumeThreshold == 0 { + config.VolumeThreshold = 20 + } + if config.ErrorThresholdPercentage == 0 { + config.ErrorThresholdPercentage = 50 + } + if config.SleepWindow == 0 { + config.SleepWindow = 5 * time.Second + } + if config.RollingWindow == 0 { + config.RollingWindow = 1 * time.Minute + } + if config.CanaryTimeout == 0 { + config.CanaryTimeout = 5 * time.Second + } + if config.CompletionCallback == nil { + config.CompletionCallback = func(err error) bool { + return !errors.Is(err, ErrTimeout) + } + } + + breaker := &lazyCircuitBreaker{ + sleepWindow: int64(config.SleepWindow * time.Nanosecond), + rollingWindow: int64(config.RollingWindow * time.Nanosecond), + volumeThreshold: config.VolumeThreshold, + errorPercentageThreshold: config.ErrorThresholdPercentage, + canaryTimeout: config.CanaryTimeout, + sendCanaryFn: canaryFn, + completionCallback: config.CompletionCallback, + } + breaker.Reset() + + return breaker +} + +func (lcb *lazyCircuitBreaker) Reset() { + now := time.Now().UnixNano() + atomic.StoreUint32(&lcb.state, circuitBreakerStateClosed) + atomic.StoreInt64(&lcb.total, 0) + atomic.StoreInt64(&lcb.failed, 0) + atomic.StoreInt64(&lcb.openedAt, 0) + atomic.StoreInt64(&lcb.windowStart, now) +} + +func (lcb *lazyCircuitBreaker) State() uint32 { + return atomic.LoadUint32(&lcb.state) +} + +func (lcb *lazyCircuitBreaker) AllowsRequest() bool { + state := lcb.State() + if state == circuitBreakerStateClosed { + return true + } + + elapsed := (time.Now().UnixNano() - atomic.LoadInt64(&lcb.openedAt)) > lcb.sleepWindow + if elapsed && atomic.CompareAndSwapUint32(&lcb.state, circuitBreakerStateOpen, circuitBreakerStateHalfOpen) { + // If we're outside of the sleep window and the circuit is open then send a canary. + go lcb.sendCanaryFn() + } + return false +} + +func (lcb *lazyCircuitBreaker) MarkSuccessful() { + if atomic.CompareAndSwapUint32(&lcb.state, circuitBreakerStateHalfOpen, circuitBreakerStateClosed) { + logDebugf("Moving circuit breaker to closed") + lcb.Reset() + return + } + + lcb.maybeResetRollingWindow() + atomic.AddInt64(&lcb.total, 1) +} + +func (lcb *lazyCircuitBreaker) MarkFailure() { + now := time.Now().UnixNano() + if atomic.CompareAndSwapUint32(&lcb.state, circuitBreakerStateHalfOpen, circuitBreakerStateOpen) { + logDebugf("Moving circuit breaker from half open to open") + atomic.StoreInt64(&lcb.openedAt, now) + return + } + + lcb.maybeResetRollingWindow() + atomic.AddInt64(&lcb.total, 1) + atomic.AddInt64(&lcb.failed, 1) + lcb.maybeOpenCircuit() +} + +func (lcb *lazyCircuitBreaker) CanaryTimeout() time.Duration { + return lcb.canaryTimeout +} + +func (lcb *lazyCircuitBreaker) CompletionCallback(err error) bool { + return lcb.completionCallback(err) +} + +func (lcb *lazyCircuitBreaker) maybeOpenCircuit() { + if atomic.LoadInt64(&lcb.total) < lcb.volumeThreshold { + return + } + + currentPercentage := (float64(atomic.LoadInt64(&lcb.failed)) / float64(atomic.LoadInt64(&lcb.total))) * 100 + if currentPercentage >= lcb.errorPercentageThreshold { + logDebugf("Moving circuit breaker to open") + atomic.StoreUint32(&lcb.state, circuitBreakerStateOpen) + atomic.StoreInt64(&lcb.openedAt, time.Now().UnixNano()) + } +} + +func (lcb *lazyCircuitBreaker) maybeResetRollingWindow() { + now := time.Now().UnixNano() + if (now - atomic.LoadInt64(&lcb.windowStart)) <= lcb.rollingWindow { + return + } + + atomic.StoreInt64(&lcb.windowStart, now) + atomic.StoreInt64(&lcb.total, 0) + atomic.StoreInt64(&lcb.failed, 0) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/clusteragent.go b/vendor/github.com/couchbase/gocbcore/v9/clusteragent.go new file mode 100644 index 000000000000..85391becb55b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/clusteragent.go @@ -0,0 +1,223 @@ +package gocbcore + +import ( + "fmt" + "sync" + "time" +) + +type clusterAgent struct { + tlsConfig *dynTLSConfig + defaultRetryStrategy RetryStrategy + + httpMux *httpMux + tracer *tracerComponent + http *httpComponent + diagnostics *diagnosticsComponent + n1ql *n1qlQueryComponent + analytics *analyticsQueryComponent + search *searchQueryComponent + views *viewQueryComponent + + revLock sync.Mutex + revID int64 + + configWatchLock sync.Mutex + configWatchers []routeConfigWatcher +} + +func createClusterAgent(config *clusterAgentConfig) *clusterAgent { + var tlsConfig *dynTLSConfig + if config.UseTLS { + tlsConfig = createTLSConfig(config.Auth, config.TLSRootCAProvider) + } + + httpCli := createHTTPClient(config.HTTPMaxIdleConns, config.HTTPMaxIdleConnsPerHost, + config.HTTPIdleConnectionTimeout, tlsConfig) + + tracer := config.Tracer + if tracer == nil { + tracer = noopTracer{} + } + tracerCmpt := newTracerComponent(tracer, "", config.NoRootTraceSpans) + + c := &clusterAgent{ + tlsConfig: tlsConfig, + tracer: tracerCmpt, + + defaultRetryStrategy: config.DefaultRetryStrategy, + } + if c.defaultRetryStrategy == nil { + c.defaultRetryStrategy = newFailFastRetryStrategy() + } + + circuitBreakerConfig := config.CircuitBreakerConfig + auth := config.Auth + userAgent := config.UserAgent + + var httpEpList []string + for _, hostPort := range config.HTTPAddrs { + if c.tlsConfig == nil { + httpEpList = append(httpEpList, fmt.Sprintf("http://%s", hostPort)) + } else { + httpEpList = append(httpEpList, fmt.Sprintf("https://%s", hostPort)) + } + } + + c.httpMux = newHTTPMux(circuitBreakerConfig, c) + c.http = newHTTPComponent( + httpComponentProps{ + UserAgent: userAgent, + DefaultRetryStrategy: c.defaultRetryStrategy, + }, + httpCli, + c.httpMux, + auth, + c.tracer, + ) + c.n1ql = newN1QLQueryComponent(c.http, c, c.tracer) + c.analytics = newAnalyticsQueryComponent(c.http, c.tracer) + c.search = newSearchQueryComponent(c.http, c.tracer) + c.views = newViewQueryComponent(c.http, c.tracer) + // diagnostics at this level will never need to hook KV. There are no persistent connections + // so Diagnostics calls should be blocked. Ping and WaitUntilReady will only try HTTP services. + c.diagnostics = newDiagnosticsComponent(nil, c.httpMux, c.http, "", c.defaultRetryStrategy, nil) + + // Kick everything off. + cfg := &routeConfig{ + mgmtEpList: httpEpList, + revID: -1, + } + + c.httpMux.OnNewRouteConfig(cfg) + + return c +} + +func (agent *clusterAgent) RegisterWith(cfgMgr configManager) { + cfgMgr.AddConfigWatcher(agent) +} + +func (agent *clusterAgent) UnregisterWith(cfgMgr configManager) { + cfgMgr.RemoveConfigWatcher(agent) +} + +func (agent *clusterAgent) AddConfigWatcher(watcher routeConfigWatcher) { + agent.configWatchLock.Lock() + agent.configWatchers = append(agent.configWatchers, watcher) + agent.configWatchLock.Unlock() +} + +func (agent *clusterAgent) RemoveConfigWatcher(watcher routeConfigWatcher) { + var idx int + agent.configWatchLock.Lock() + for i, w := range agent.configWatchers { + if w == watcher { + idx = i + } + } + + if idx == len(agent.configWatchers) { + agent.configWatchers = agent.configWatchers[:idx] + } else { + agent.configWatchers = append(agent.configWatchers[:idx], agent.configWatchers[idx+1:]...) + } + agent.configWatchLock.Unlock() +} + +func (agent *clusterAgent) OnNewRouteConfig(cfg *routeConfig) { + agent.revLock.Lock() + // This could be coming from multiple agents so we need to make sure that it's up to date with what we've seen. + if cfg.revID <= agent.revID { + agent.revLock.Unlock() + return + } + + logDebugf("Cluster agent applying config rev id: %d\n", cfg.revID) + + agent.revID = cfg.revID + agent.revLock.Unlock() + agent.configWatchLock.Lock() + watchers := agent.configWatchers + agent.configWatchLock.Unlock() + + for _, watcher := range watchers { + watcher.OnNewRouteConfig(cfg) + } +} + +// N1QLQuery executes a N1QL query against a random connected agent. +func (agent *clusterAgent) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + return agent.n1ql.N1QLQuery(opts, cb) +} + +// PreparedN1QLQuery executes a prepared N1QL query against a random connected agent. +func (agent *clusterAgent) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + return agent.n1ql.PreparedN1QLQuery(opts, cb) +} + +// AnalyticsQuery executes an analytics query against a random connected agent. +func (agent *clusterAgent) AnalyticsQuery(opts AnalyticsQueryOptions, cb AnalyticsQueryCallback) (PendingOp, error) { + return agent.analytics.AnalyticsQuery(opts, cb) +} + +// SearchQuery executes a Search query against a random connected agent. +func (agent *clusterAgent) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) { + return agent.search.SearchQuery(opts, cb) +} + +// ViewQuery executes a view query against a random connected agent. +func (agent *clusterAgent) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) { + return agent.views.ViewQuery(opts, cb) +} + +// DoHTTPRequest will perform an HTTP request against one of the HTTP +// services which are available within the SDK, using a random connected agent. +func (agent *clusterAgent) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) { + return agent.http.DoHTTPRequest(req, cb) +} + +// Ping pings all of the servers we are connected to and returns +// a report regarding the pings that were performed. +func (agent *clusterAgent) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) { + for _, srv := range opts.ServiceTypes { + if srv == MemdService { + return nil, wrapError(errInvalidArgument, "memd service is not valid for use with clusterAgent") + } else if srv == CapiService { + return nil, wrapError(errInvalidArgument, "capi service is not valid for use with clusterAgent") + } + } + + if len(opts.ServiceTypes) == 0 { + opts.ServiceTypes = []ServiceType{CbasService, FtsService, N1qlService, MgmtService} + opts.ignoreMissingServices = true + } + + return agent.diagnostics.Ping(opts, cb) +} + +// WaitUntilReady returns whether or not the Agent has seen a valid cluster config. +func (agent *clusterAgent) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, cb WaitUntilReadyCallback) (PendingOp, error) { + for _, srv := range opts.ServiceTypes { + if srv == MemdService { + return nil, wrapError(errInvalidArgument, "memd service is not valid for use with clusterAgent") + } else if srv == CapiService { + return nil, wrapError(errInvalidArgument, "capi service is not valid for use with clusterAgent") + } + } + + if len(opts.ServiceTypes) == 0 { + opts.ServiceTypes = []ServiceType{CbasService, FtsService, N1qlService, MgmtService} + } + + return agent.diagnostics.WaitUntilReady(deadline, opts, cb) +} + +// Close shuts down the agent, closing the underlying http client. This does not cause the agent +// to unregister itself with any configuration providers so be sure to do that first. +func (agent *clusterAgent) Close() error { + // Close the transports so that they don't hold open goroutines. + agent.http.Close() + + return nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/clusteragent_config.go b/vendor/github.com/couchbase/gocbcore/v9/clusteragent_config.go new file mode 100644 index 000000000000..d9af3c3855df --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/clusteragent_config.go @@ -0,0 +1,41 @@ +package gocbcore + +import ( + "crypto/x509" + "time" +) + +type clusterAgentConfig struct { + HTTPAddrs []string + UserAgent string + UseTLS bool + Auth AuthProvider + + TLSRootCAProvider func() *x509.CertPool + + HTTPMaxIdleConns int + HTTPMaxIdleConnsPerHost int + HTTPIdleConnectionTimeout time.Duration + + // Volatile: Tracer API is subject to change. + Tracer RequestTracer + NoRootTraceSpans bool + + DefaultRetryStrategy RetryStrategy + CircuitBreakerConfig CircuitBreakerConfig +} + +func (config *clusterAgentConfig) redacted() interface{} { + newConfig := clusterAgentConfig{} + newConfig = *config + if isLogRedactionLevelFull() { + // The slices here are still pointing at config's underlying arrays + // so we need to make them not do that. + newConfig.HTTPAddrs = append([]string(nil), newConfig.HTTPAddrs...) + for i, addr := range newConfig.HTTPAddrs { + newConfig.HTTPAddrs[i] = redactSystemData(addr) + } + } + + return newConfig +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/collections.go b/vendor/github.com/couchbase/gocbcore/v9/collections.go new file mode 100644 index 000000000000..d402f10f91e7 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/collections.go @@ -0,0 +1,117 @@ +package gocbcore + +import ( + "encoding/json" + "strconv" +) + +const ( + unknownCid = uint32(0xFFFFFFFF) + pendingCid = uint32(0xFFFFFFFE) +) + +// ManifestCollection is the representation of a collection within a manifest. +type ManifestCollection struct { + UID uint32 + Name string +} + +// UnmarshalJSON is a custom implementation of json unmarshaling. +func (item *ManifestCollection) UnmarshalJSON(data []byte) error { + decData := struct { + UID string `json:"uid"` + Name string `json:"name"` + }{} + if err := json.Unmarshal(data, &decData); err != nil { + return err + } + + decUID, err := strconv.ParseUint(decData.UID, 16, 32) + if err != nil { + return err + } + + item.UID = uint32(decUID) + item.Name = decData.Name + return nil +} + +// ManifestScope is the representation of a scope within a manifest. +type ManifestScope struct { + UID uint32 + Name string + Collections []ManifestCollection +} + +// UnmarshalJSON is a custom implementation of json unmarshaling. +func (item *ManifestScope) UnmarshalJSON(data []byte) error { + decData := struct { + UID string `json:"uid"` + Name string `json:"name"` + Collections []ManifestCollection `json:"collections"` + }{} + if err := json.Unmarshal(data, &decData); err != nil { + return err + } + + decUID, err := strconv.ParseUint(decData.UID, 16, 32) + if err != nil { + return err + } + + item.UID = uint32(decUID) + item.Name = decData.Name + item.Collections = decData.Collections + return nil +} + +// Manifest is the representation of a collections manifest. +type Manifest struct { + UID uint64 + Scopes []ManifestScope +} + +// UnmarshalJSON is a custom implementation of json unmarshaling. +func (item *Manifest) UnmarshalJSON(data []byte) error { + decData := struct { + UID string `json:"uid"` + Scopes []ManifestScope `json:"scopes"` + }{} + if err := json.Unmarshal(data, &decData); err != nil { + return err + } + + decUID, err := strconv.ParseUint(decData.UID, 16, 64) + if err != nil { + return err + } + + item.UID = decUID + item.Scopes = decData.Scopes + return nil +} + +// GetCollectionManifestOptions are the options available to the GetCollectionManifest command. +type GetCollectionManifestOptions struct { + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext + RetryStrategy RetryStrategy +} + +// GetCollectionIDOptions are the options available to the GetCollectionID command. +type GetCollectionIDOptions struct { + RetryStrategy RetryStrategy + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetCollectionIDResult encapsulates the result of a GetCollectionID operation. +type GetCollectionIDResult struct { + ManifestID uint64 + CollectionID uint32 +} + +// GetCollectionManifestResult encapsulates the result of a GetCollectionManifest operation. +type GetCollectionManifestResult struct { + Manifest []byte +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/collectionscomponent.go b/vendor/github.com/couchbase/gocbcore/v9/collectionscomponent.go new file mode 100644 index 000000000000..3548cae76c39 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/collectionscomponent.go @@ -0,0 +1,443 @@ +package gocbcore + +import ( + "encoding/binary" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +func (cidMgr *collectionsComponent) createKey(scopeName, collectionName string) string { + return fmt.Sprintf("%s.%s", scopeName, collectionName) +} + +type collectionsComponent struct { + idMap map[string]*collectionIDCache + mapLock sync.Mutex + dispatcher dispatcher + maxQueueSize int + tracer tracerManager + defaultRetryStrategy RetryStrategy + cfgMgr configManager + + // pendingOpQueue is used when collections are enabled but we've not yet seen a cluster config to confirm + // whether or not collections are supported. + pendingOpQueue *memdOpQueue + configSeen uint32 +} + +type collectionIDProps struct { + MaxQueueSize int + DefaultRetryStrategy RetryStrategy +} + +func newCollectionIDManager(props collectionIDProps, dispatcher dispatcher, tracer tracerManager, + cfgMgr configManager) *collectionsComponent { + cidMgr := &collectionsComponent{ + dispatcher: dispatcher, + idMap: make(map[string]*collectionIDCache), + maxQueueSize: props.MaxQueueSize, + tracer: tracer, + defaultRetryStrategy: props.DefaultRetryStrategy, + cfgMgr: cfgMgr, + pendingOpQueue: newMemdOpQueue(), + } + + cfgMgr.AddConfigWatcher(cidMgr) + dispatcher.SetPostCompleteErrorHandler(cidMgr.handleOpRoutingResp) + + return cidMgr +} + +func (cidMgr *collectionsComponent) OnNewRouteConfig(cfg *routeConfig) { + if !atomic.CompareAndSwapUint32(&cidMgr.configSeen, 0, 1) { + return + } + + colsSupported := cfg.ContainsBucketCapability("collections") + cidMgr.cfgMgr.RemoveConfigWatcher(cidMgr) + cidMgr.pendingOpQueue.Close() + cidMgr.pendingOpQueue.Drain(func(request *memdQRequest) { + // Anything in this queue is here because collections were present so if we definitely don't support collections + // then fail them. + if !colsSupported { + request.tryCallback(nil, errCollectionsUnsupported) + return + } + cidMgr.requeue(request) + }) +} + +func (cidMgr *collectionsComponent) handleCollectionUnknown(req *memdQRequest) bool { + // We cannot retry requests with no collection information. + // This also prevents the GetCollectionID requests from being automatically retried. + if req.CollectionName == "" && req.ScopeName == "" { + return false + } + + shouldRetry, retryTime := retryOrchMaybeRetry(req, KVCollectionOutdatedRetryReason) + if shouldRetry { + go func() { + time.Sleep(time.Until(retryTime)) + cidMgr.requeue(req) + }() + } + + return shouldRetry +} + +func (cidMgr *collectionsComponent) handleOpRoutingResp(resp *memdQResponse, req *memdQRequest, err error) (bool, error) { + if errors.Is(err, ErrCollectionNotFound) { + if cidMgr.handleCollectionUnknown(req) { + return true, nil + } + } + + return false, err +} + +func (cidMgr *collectionsComponent) GetCollectionManifest(opts GetCollectionManifestOptions, cb GetCollectionManifestCallback) (PendingOp, error) { + tracer := cidMgr.tracer.CreateOpTrace("GetCollectionManifest", opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + cb(nil, err) + tracer.Finish() + return + } + + res := GetCollectionManifestResult{ + Manifest: resp.Value, + } + + tracer.Finish() + cb(&res, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = cidMgr.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdCollectionsGetManifest, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: nil, + Value: nil, + }, + Callback: handler, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: opts.TraceContext, + } + + return cidMgr.dispatcher.DispatchDirect(req) +} + +// GetCollectionID does not trigger retries on unknown collection. This is because the request sets the scope and collection +// name in the key rather than in the corresponding fields. +func (cidMgr *collectionsComponent) GetCollectionID(scopeName string, collectionName string, opts GetCollectionIDOptions, + cb GetCollectionIDCallback) (PendingOp, error) { + tracer := cidMgr.tracer.CreateOpTrace("GetCollectionID", opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + cidCache, ok := cidMgr.get(scopeName, collectionName) + if !ok { + cidCache = cidMgr.newCollectionIDCache(scopeName, collectionName) + cidMgr.add(cidCache, scopeName, collectionName) + } + + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + manifestID := binary.BigEndian.Uint64(resp.Extras[0:]) + collectionID := binary.BigEndian.Uint32(resp.Extras[8:]) + + cidCache.lock.Lock() + cidCache.setID(collectionID) + cidCache.lock.Unlock() + + res := GetCollectionIDResult{ + ManifestID: manifestID, + CollectionID: collectionID, + } + + tracer.Finish() + cb(&res, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = cidMgr.defaultRetryStrategy + } + + keyScopeName := scopeName + if keyScopeName == "" { + keyScopeName = "_default" + } + keyCollectionName := collectionName + if keyCollectionName == "" { + keyCollectionName = "_default" + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdCollectionsGetID, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: []byte(fmt.Sprintf("%s.%s", keyScopeName, keyCollectionName)), + Value: nil, + Vbucket: 0, + }, + ReplicaIdx: -1, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: opts.TraceContext, + } + + req.Callback = handler + + return cidMgr.dispatcher.DispatchDirect(req) +} + +func (cidMgr *collectionsComponent) add(id *collectionIDCache, scopeName, collectionName string) { + key := cidMgr.createKey(scopeName, collectionName) + cidMgr.mapLock.Lock() + cidMgr.idMap[key] = id + cidMgr.mapLock.Unlock() +} + +func (cidMgr *collectionsComponent) get(scopeName, collectionName string) (*collectionIDCache, bool) { + cidMgr.mapLock.Lock() + id, ok := cidMgr.idMap[cidMgr.createKey(scopeName, collectionName)] + cidMgr.mapLock.Unlock() + if !ok { + return nil, false + } + + return id, true +} + +func (cidMgr *collectionsComponent) remove(scopeName, collectionName string) { + logDebugf("Removing cache entry for", scopeName, collectionName) + cidMgr.mapLock.Lock() + delete(cidMgr.idMap, cidMgr.createKey(scopeName, collectionName)) + cidMgr.mapLock.Unlock() +} + +func (cidMgr *collectionsComponent) newCollectionIDCache(scope, collection string) *collectionIDCache { + return &collectionIDCache{ + dispatcher: cidMgr.dispatcher, + maxQueueSize: cidMgr.maxQueueSize, + parent: cidMgr, + scopeName: scope, + collectionName: collection, + } +} + +type collectionIDCache struct { + opQueue *memdOpQueue + id uint32 + collectionName string + scopeName string + parent *collectionsComponent + dispatcher dispatcher + lock sync.Mutex + maxQueueSize int +} + +func (cid *collectionIDCache) sendWithCid(req *memdQRequest) error { + cid.lock.Lock() + req.CollectionID = cid.id + cid.lock.Unlock() + _, err := cid.dispatcher.DispatchDirect(req) + if err != nil { + return err + } + + return nil +} + +func (cid *collectionIDCache) queueRequest(req *memdQRequest) error { + cid.lock.Lock() + defer cid.lock.Unlock() + return cid.opQueue.Push(req, cid.maxQueueSize) +} + +func (cid *collectionIDCache) setID(id uint32) { + logDebugf("Setting cache ID to %d for %s.%s", id, cid.scopeName, cid.collectionName) + cid.id = id +} + +func (cid *collectionIDCache) refreshCid(req *memdQRequest) error { + err := cid.opQueue.Push(req, cid.maxQueueSize) + if err != nil { + return err + } + + logDebugf("Refreshing collection ID for %s.%s", req.ScopeName, req.CollectionName) + _, err = cid.parent.GetCollectionID(req.ScopeName, req.CollectionName, GetCollectionIDOptions{TraceContext: req.RootTraceContext}, + func(result *GetCollectionIDResult, err error) { + if err != nil { + if errors.Is(err, ErrCollectionNotFound) { + // The collection is unknown so we need to mark the cid unknown and attempt to retry the request. + // Retrying the request will requeue it in the cid manager so either it will pick up the unknown cid + // and cause a refresh or another request will and this one will get queued within the cache. + // Either the collection will eventually come online or this request will timeout. + logDebugf("Collection %s.%s not found, attempting retry", req.ScopeName, req.CollectionName) + cid.lock.Lock() + cid.setID(unknownCid) + cid.lock.Unlock() + if cid.opQueue.Remove(req) { + if cid.parent.handleCollectionUnknown(req) { + return + } + } else { + logDebugf("Request no longer existed in op queue, possibly cancelled?", + req.Opaque, req.CollectionName) + } + } else { + logDebugf("Collection ID refresh failed: %v", err) + } + + // There was an error getting this collection ID so lets remove the cache from the manager and try to + // callback on all of the queued requests. + cid.parent.remove(req.ScopeName, req.CollectionName) + cid.opQueue.Close() + cid.opQueue.Drain(func(request *memdQRequest) { + request.tryCallback(nil, err) + }) + return + } + + // We successfully got the cid, the GetCollectionID itself will have handled setting the ID on this cache, + // so lets reset the op queue and requeue all of our requests. + logDebugf("Collection %s.%s refresh succeeded, requeuing requests", req.ScopeName, req.CollectionName) + cid.lock.Lock() + opQueue := cid.opQueue + cid.opQueue = newMemdOpQueue() + cid.lock.Unlock() + + opQueue.Close() + opQueue.Drain(func(request *memdQRequest) { + request.CollectionID = result.CollectionID + cid.dispatcher.RequeueDirect(request, false) + }) + }, + ) + + return err +} + +func (cid *collectionIDCache) dispatch(req *memdQRequest) error { + cid.lock.Lock() + // if the cid is unknown then mark the request pending and refresh cid first + // if it's pending then queue the request + // otherwise send the request + switch cid.id { + case unknownCid: + logDebugf("Collection %s.%s unknown, refreshing id", req.ScopeName, req.CollectionName) + cid.setID(pendingCid) + cid.opQueue = newMemdOpQueue() + + // We attempt to send the refresh inside of the lock, that way we haven't released the lock and allowed an op + // to get queued if we need to move the status back to unknown. Without doing this it's possible for one or + // more op(s) to sneak into the queue and then no more requests come in and those sit in the queue until they + // timeout because nothing is triggering the cid refresh. + err := cid.refreshCid(req) + if err != nil { + // We've failed to send the cid refresh so we need to set it back to unknown otherwise it'll never + // get updated. + cid.setID(unknownCid) + cid.lock.Unlock() + return err + } + cid.lock.Unlock() + return nil + case pendingCid: + logDebugf("Collection %s.%s pending, queueing request", req.ScopeName, req.CollectionName) + cid.lock.Unlock() + return cid.queueRequest(req) + default: + cid.lock.Unlock() + return cid.sendWithCid(req) + } +} + +func (cidMgr *collectionsComponent) Dispatch(req *memdQRequest) (PendingOp, error) { + noCollection := req.CollectionName == "" && req.ScopeName == "" + defaultCollection := req.CollectionName == "_default" && req.ScopeName == "_default" + collectionIDPresent := req.CollectionID > 0 + + // If the user didn't enable collections then we can just not bother with any collections logic. + if !cidMgr.dispatcher.CollectionsEnabled() { + if !(noCollection || defaultCollection) || collectionIDPresent { + return nil, errCollectionsUnsupported + } + _, err := cidMgr.dispatcher.DispatchDirect(req) + if err != nil { + return nil, err + } + + return req, nil + } + + if noCollection || defaultCollection || collectionIDPresent { + return cidMgr.dispatcher.DispatchDirect(req) + } + + if atomic.LoadUint32(&cidMgr.configSeen) == 0 { + logDebugf("Collections are enabled but we've not yet seen a config so queueing request") + err := cidMgr.pendingOpQueue.Push(req, cidMgr.maxQueueSize) + if err != nil { + return nil, err + } + + return req, nil + } + + if !cidMgr.dispatcher.SupportsCollections() { + return nil, errCollectionsUnsupported + } + + cidCache, ok := cidMgr.get(req.ScopeName, req.CollectionName) + if !ok { + cidCache = cidMgr.newCollectionIDCache(req.ScopeName, req.CollectionName) + cidCache.setID(unknownCid) + cidMgr.add(cidCache, req.ScopeName, req.CollectionName) + } + err := cidCache.dispatch(req) + if err != nil { + return nil, err + } + + return req, nil +} + +func (cidMgr *collectionsComponent) requeue(req *memdQRequest) { + cidCache, ok := cidMgr.get(req.ScopeName, req.CollectionName) + if !ok { + cidCache = cidMgr.newCollectionIDCache(req.ScopeName, req.CollectionName) + cidCache.setID(unknownCid) + cidMgr.add(cidCache, req.ScopeName, req.CollectionName) + } + cidCache.lock.Lock() + if cidCache.id != unknownCid && cidCache.id != pendingCid { + cidCache.setID(unknownCid) + } + cidCache.lock.Unlock() + + err := cidCache.dispatch(req) + if err != nil { + req.tryCallback(nil, err) + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/commonflags.go b/vendor/github.com/couchbase/gocbcore/v9/commonflags.go new file mode 100644 index 000000000000..44adcd69cd74 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/commonflags.go @@ -0,0 +1,111 @@ +package gocbcore + +const ( + // Legacy flag format for JSON data. + lfJSON = 0 + + // Common flags mask + cfMask = 0xFF000000 + // Common flags mask for data format + cfFmtMask = 0x0F000000 + // Common flags mask for compression mode. + cfCmprMask = 0xE0000000 + + // Common flag format for sdk-private data. + cfFmtPrivate = 1 << 24 // nolint: deadcode,varcheck,unused + // Common flag format for JSON data. + cfFmtJSON = 2 << 24 + // Common flag format for binary data. + cfFmtBinary = 3 << 24 + // Common flag format for string data. + cfFmtString = 4 << 24 + + // Common flags compression for disabled compression. + cfCmprNone = 0 << 29 +) + +// DataType represents the type of data for a value +type DataType uint32 + +// CompressionType indicates the type of compression for a value +type CompressionType uint32 + +const ( + // UnknownType indicates the values type is unknown. + UnknownType = DataType(0) + + // JSONType indicates the value is JSON data. + JSONType = DataType(1) + + // BinaryType indicates the value is binary data. + BinaryType = DataType(2) + + // StringType indicates the value is string data. + StringType = DataType(3) +) + +const ( + // UnknownCompression indicates that the compression type is unknown. + UnknownCompression = CompressionType(0) + + // NoCompression indicates that no compression is being used. + NoCompression = CompressionType(1) +) + +// EncodeCommonFlags encodes a data type and compression type into a flags +// value using the common flags specification. +func EncodeCommonFlags(valueType DataType, compression CompressionType) uint32 { + var flags uint32 + + switch valueType { + case JSONType: + flags |= cfFmtJSON + case BinaryType: + flags |= cfFmtBinary + case StringType: + flags |= cfFmtString + case UnknownType: + // flags |= ? + } + + switch compression { + case NoCompression: + // flags |= 0 + case UnknownCompression: + // flags |= ? + } + + return flags +} + +// DecodeCommonFlags decodes a flags value into a data type and compression type +// using the common flags specification. +func DecodeCommonFlags(flags uint32) (DataType, CompressionType) { + // Check for legacy flags + if flags&cfMask == 0 { + // Legacy Flags + if flags == lfJSON { + // Legacy JSON + flags = cfFmtJSON + } else { + return UnknownType, UnknownCompression + } + } + + valueType := UnknownType + compression := UnknownCompression + + if flags&cfFmtMask == cfFmtBinary { + valueType = BinaryType + } else if flags&cfFmtMask == cfFmtString { + valueType = StringType + } else if flags&cfFmtMask == cfFmtJSON { + valueType = JSONType + } + + if flags&cfCmprMask == cfCmprNone { + compression = NoCompression + } + + return valueType, compression +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/config.go b/vendor/github.com/couchbase/gocbcore/v9/config.go new file mode 100644 index 000000000000..540ccabeba5f --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/config.go @@ -0,0 +1,314 @@ +package gocbcore + +import ( + "encoding/json" + "fmt" + "net" + "strings" +) + +// A Node is a computer in a cluster running the couchbase software. +type cfgNode struct { + ClusterCompatibility int `json:"clusterCompatibility"` + ClusterMembership string `json:"clusterMembership"` + CouchAPIBase string `json:"couchApiBase"` + Hostname string `json:"hostname"` + InterestingStats map[string]float64 `json:"interestingStats,omitempty"` + MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"` + MCDMemoryReserved float64 `json:"mcdMemoryReserved"` + MemoryFree float64 `json:"memoryFree"` + MemoryTotal float64 `json:"memoryTotal"` + OS string `json:"os"` + Ports map[string]int `json:"ports"` + Status string `json:"status"` + Uptime int `json:"uptime,string"` + Version string `json:"version"` + ThisNode bool `json:"thisNode,omitempty"` +} + +type cfgNodeServices struct { + Kv uint16 `json:"kv"` + Capi uint16 `json:"capi"` + Mgmt uint16 `json:"mgmt"` + N1ql uint16 `json:"n1ql"` + Fts uint16 `json:"fts"` + Cbas uint16 `json:"cbas"` + KvSsl uint16 `json:"kvSSL"` + CapiSsl uint16 `json:"capiSSL"` + MgmtSsl uint16 `json:"mgmtSSL"` + N1qlSsl uint16 `json:"n1qlSSL"` + FtsSsl uint16 `json:"ftsSSL"` + CbasSsl uint16 `json:"cbasSSL"` +} + +type cfgNodeAltAddress struct { + Ports *cfgNodeServices `json:"ports,omitempty"` + Hostname string `json:"hostname"` +} + +type cfgNodeExt struct { + Services cfgNodeServices `json:"services"` + Hostname string `json:"hostname"` + AltAddresses map[string]cfgNodeAltAddress `json:"alternateAddresses"` +} + +// VBucketServerMap is the a mapping of vbuckets to nodes. +type cfgVBucketServerMap struct { + HashAlgorithm string `json:"hashAlgorithm"` + NumReplicas int `json:"numReplicas"` + ServerList []string `json:"serverList"` + VBucketMap [][]int `json:"vBucketMap"` +} + +// Bucket is the primary entry point for most data operations. +type cfgBucket struct { + Rev int64 `json:"rev"` + SourceHostname string + Capabilities []string `json:"bucketCapabilities"` + CapabilitiesVersion string `json:"bucketCapabilitiesVer"` + Name string `json:"name"` + NodeLocator string `json:"nodeLocator"` + URI string `json:"uri"` + StreamingURI string `json:"streamingUri"` + UUID string `json:"uuid"` + DDocs struct { + URI string `json:"uri"` + } `json:"ddocs,omitempty"` + + // These are used for JSON IO, but isn't used for processing + // since it needs to be swapped out safely. + VBucketServerMap cfgVBucketServerMap `json:"vBucketServerMap"` + Nodes []cfgNode `json:"nodes"` + NodesExt []cfgNodeExt `json:"nodesExt,omitempty"` + ClusterCapabilitiesVer []int `json:"clusterCapabilitiesVer,omitempty"` + ClusterCapabilities map[string][]string `json:"clusterCapabilities,omitempty"` +} + +func (cfg *cfgBucket) BuildRouteConfig(useSsl bool, networkType string, firstConnect bool) *routeConfig { + var kvServerList []string + var capiEpList []string + var mgmtEpList []string + var n1qlEpList []string + var ftsEpList []string + var cbasEpList []string + var bktType bucketType + + switch cfg.NodeLocator { + case "ketama": + bktType = bktTypeMemcached + case "vbucket": + bktType = bktTypeCouchbase + default: + if cfg.UUID == "" { + bktType = bktTypeNone + } else { + logDebugf("Invalid nodeLocator %s", cfg.NodeLocator) + bktType = bktTypeInvalid + } + } + + if cfg.NodesExt != nil { + lenNodes := len(cfg.Nodes) + for i, node := range cfg.NodesExt { + hostname := node.Hostname + ports := node.Services + + if networkType != "default" { + if altAddr, ok := node.AltAddresses[networkType]; ok { + hostname = altAddr.Hostname + if altAddr.Ports != nil { + ports = *altAddr.Ports + } + } else { + if !firstConnect { + logDebugf("Invalid config network type %s", networkType) + } + continue + } + } + + hostname = getHostname(hostname, cfg.SourceHostname) + + endpoints := endpointsFromPorts(useSsl, ports, cfg.Name, hostname) + if endpoints.kvServer != "" { + if bktType > bktTypeInvalid && i >= lenNodes { + logDebugf("KV node present in nodesext but not in nodes for %s", endpoints.kvServer) + } else { + kvServerList = append(kvServerList, endpoints.kvServer) + } + } + if endpoints.capiEp != "" { + capiEpList = append(capiEpList, endpoints.capiEp) + } + if endpoints.mgmtEp != "" { + mgmtEpList = append(mgmtEpList, endpoints.mgmtEp) + } + if endpoints.n1qlEp != "" { + n1qlEpList = append(n1qlEpList, endpoints.n1qlEp) + } + if endpoints.ftsEp != "" { + ftsEpList = append(ftsEpList, endpoints.ftsEp) + } + if endpoints.cbasEp != "" { + cbasEpList = append(cbasEpList, endpoints.cbasEp) + } + } + } else { + if useSsl { + logErrorf("Received config without nodesExt while SSL is enabled. Generating invalid config.") + return &routeConfig{} + } + + if bktType == bktTypeCouchbase { + kvServerList = cfg.VBucketServerMap.ServerList + } + + for _, node := range cfg.Nodes { + if node.CouchAPIBase != "" { + // Slice off the UUID as Go's HTTP client cannot handle being passed URL-Encoded path values. + capiEp := strings.SplitN(node.CouchAPIBase, "%2B", 2)[0] + + capiEpList = append(capiEpList, capiEp) + } + if node.Hostname != "" { + mgmtEpList = append(mgmtEpList, fmt.Sprintf("http://%s", node.Hostname)) + } + + if bktType == bktTypeMemcached { + // Get the data port. No VBucketServerMap. + host, err := hostFromHostPort(node.Hostname) + if err != nil { + logErrorf("Encountered invalid memcached host/port string. Ignoring node.") + continue + } + + curKvHost := fmt.Sprintf("%s:%d", host, node.Ports["direct"]) + kvServerList = append(kvServerList, curKvHost) + } + } + } + + rc := &routeConfig{ + revID: cfg.Rev, + uuid: cfg.UUID, + name: cfg.Name, + kvServerList: kvServerList, + capiEpList: capiEpList, + mgmtEpList: mgmtEpList, + n1qlEpList: n1qlEpList, + ftsEpList: ftsEpList, + cbasEpList: cbasEpList, + bktType: bktType, + clusterCapabilities: cfg.ClusterCapabilities, + clusterCapabilitiesVer: cfg.ClusterCapabilitiesVer, + bucketCapabilities: cfg.Capabilities, + bucketCapabilitiesVer: cfg.CapabilitiesVersion, + } + + if bktType == bktTypeCouchbase { + vbMap := cfg.VBucketServerMap.VBucketMap + numReplicas := cfg.VBucketServerMap.NumReplicas + rc.vbMap = newVbucketMap(vbMap, numReplicas) + } else if bktType == bktTypeMemcached { + rc.ketamaMap = newKetamaContinuum(kvServerList) + } + + return rc +} + +type serverEps struct { + kvServer string + capiEp string + mgmtEp string + n1qlEp string + ftsEp string + cbasEp string +} + +func getHostname(hostname, sourceHostname string) string { + // Hostname blank means to use the same one as was connected to + if hostname == "" { + // Note that the SourceHostname will already be IPv6 wrapped + hostname = sourceHostname + } else { + // We need to detect an IPv6 address here and wrap it in the appropriate + // [] block to indicate its IPv6 for the rest of the system. + if strings.Contains(hostname, ":") { + hostname = "[" + hostname + "]" + } + } + + return hostname +} + +func endpointsFromPorts(useSsl bool, ports cfgNodeServices, name, hostname string) *serverEps { + lists := &serverEps{} + + if useSsl { + if ports.KvSsl > 0 { + lists.kvServer = fmt.Sprintf("%s:%d", hostname, ports.KvSsl) + } + if ports.Capi > 0 { + lists.capiEp = fmt.Sprintf("https://%s:%d/%s", hostname, ports.CapiSsl, name) + } + if ports.Mgmt > 0 { + lists.mgmtEp = fmt.Sprintf("https://%s:%d", hostname, ports.MgmtSsl) + } + if ports.N1ql > 0 { + lists.n1qlEp = fmt.Sprintf("https://%s:%d", hostname, ports.N1qlSsl) + } + if ports.Fts > 0 { + lists.ftsEp = fmt.Sprintf("https://%s:%d", hostname, ports.FtsSsl) + } + if ports.Cbas > 0 { + lists.cbasEp = fmt.Sprintf("https://%s:%d", hostname, ports.CbasSsl) + } + } else { + if ports.Kv > 0 { + lists.kvServer = fmt.Sprintf("%s:%d", hostname, ports.Kv) + } + if ports.Capi > 0 { + lists.capiEp = fmt.Sprintf("http://%s:%d/%s", hostname, ports.Capi, name) + } + if ports.Mgmt > 0 { + lists.mgmtEp = fmt.Sprintf("http://%s:%d", hostname, ports.Mgmt) + } + if ports.N1ql > 0 { + lists.n1qlEp = fmt.Sprintf("http://%s:%d", hostname, ports.N1ql) + } + if ports.Fts > 0 { + lists.ftsEp = fmt.Sprintf("http://%s:%d", hostname, ports.Fts) + } + if ports.Cbas > 0 { + lists.cbasEp = fmt.Sprintf("http://%s:%d", hostname, ports.Cbas) + } + } + return lists +} + +func hostFromHostPort(hostport string) (string, error) { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return "", err + } + + // If this is an IPv6 address, we need to rewrap it in [] + if strings.Contains(host, ":") { + return "[" + host + "]", nil + } + + return host, nil +} + +func parseConfig(config []byte, srcHost string) (*cfgBucket, error) { + configStr := strings.Replace(string(config), "$HOST", srcHost, -1) + + bk := new(cfgBucket) + err := json.Unmarshal([]byte(configStr), bk) + if err != nil { + return nil, err + } + + bk.SourceHostname = srcHost + return bk, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/configmanagement_component.go b/vendor/github.com/couchbase/gocbcore/v9/configmanagement_component.go new file mode 100644 index 000000000000..03ac5e8ce149 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/configmanagement_component.go @@ -0,0 +1,184 @@ +package gocbcore + +import ( + "sync" +) + +type configManagementComponent struct { + useSSL bool + networkType string + + currentConfig *routeConfig + + cfgChangeWatchers []routeConfigWatcher + watchersLock sync.Mutex + + srcServers []string + + seenConfig bool +} + +type configManagerProperties struct { + UseSSL bool + NetworkType string + SrcMemdAddrs []string + SrcHTTPAddrs []string +} + +type routeConfigWatcher interface { + OnNewRouteConfig(cfg *routeConfig) +} + +type configManager interface { + AddConfigWatcher(watcher routeConfigWatcher) + RemoveConfigWatcher(watcher routeConfigWatcher) +} + +func newConfigManager(props configManagerProperties) *configManagementComponent { + return &configManagementComponent{ + useSSL: props.UseSSL, + networkType: props.NetworkType, + srcServers: append(props.SrcMemdAddrs, props.SrcHTTPAddrs...), + currentConfig: &routeConfig{ + revID: -1, + }, + } +} + +func (cm *configManagementComponent) OnNewConfig(cfg *cfgBucket) { + var routeCfg *routeConfig + if cm.seenConfig { + routeCfg = cfg.BuildRouteConfig(cm.useSSL, cm.networkType, false) + } else { + routeCfg = cm.buildFirstRouteConfig(cfg) + logDebugf("Using network type %s for connections", cm.networkType) + } + if !routeCfg.IsValid() { + logDebugf("Routing data is not valid, skipping update: \n%s", routeCfg.DebugString()) + return + } + + // There's something wrong with this route config so don't send it to the watchers. + if !cm.updateRouteConfig(routeCfg) { + return + } + + logDebugf("Sending out mux routing data (update)...") + logDebugf("New Routing Data:\n%s", routeCfg.DebugString()) + + cm.seenConfig = true + + // We can end up deadlocking if we iterate whilst in the lock and a watcher decides to remove itself. + cm.watchersLock.Lock() + watchers := make([]routeConfigWatcher, len(cm.cfgChangeWatchers)) + copy(watchers, cm.cfgChangeWatchers) + cm.watchersLock.Unlock() + + for _, watcher := range watchers { + watcher.OnNewRouteConfig(routeCfg) + } +} + +func (cm *configManagementComponent) AddConfigWatcher(watcher routeConfigWatcher) { + cm.watchersLock.Lock() + cm.cfgChangeWatchers = append(cm.cfgChangeWatchers, watcher) + cm.watchersLock.Unlock() +} + +func (cm *configManagementComponent) RemoveConfigWatcher(watcher routeConfigWatcher) { + var idx int + cm.watchersLock.Lock() + for i, w := range cm.cfgChangeWatchers { + if w == watcher { + idx = i + } + } + + if idx == len(cm.cfgChangeWatchers) { + cm.cfgChangeWatchers = cm.cfgChangeWatchers[:idx] + } else { + cm.cfgChangeWatchers = append(cm.cfgChangeWatchers[:idx], cm.cfgChangeWatchers[idx+1:]...) + } + cm.watchersLock.Unlock() +} + +// We should never be receiving concurrent updates and nothing should be accessing +// our internal route config so we shouldn't need to lock here. +func (cm *configManagementComponent) updateRouteConfig(cfg *routeConfig) bool { + oldCfg := cm.currentConfig + + // Check some basic things to ensure consistency! + if oldCfg.revID > -1 { + if (cfg.vbMap == nil) != (oldCfg.vbMap == nil) { + logErrorf("Received a configuration with a different number of vbuckets. Ignoring.") + return false + } + + if cfg.vbMap != nil && cfg.vbMap.NumVbuckets() != oldCfg.vbMap.NumVbuckets() { + logErrorf("Received a configuration with a different number of vbuckets. Ignoring.") + return false + } + } + + // Check that the new config data is newer than the current one, in the case where we've done a select bucket + // against an existing connection then the revisions could be the same. In that case the configuration still + // needs to be applied. + if cfg.revID == 0 { + logDebugf("Unversioned configuration data, switching.") + } else if cfg.bktType != oldCfg.bktType { + logDebugf("Configuration data changed bucket type, switching.") + } else if cfg.revID == oldCfg.revID { + logDebugf("Ignoring configuration with identical revision number") + return false + } else if cfg.revID < oldCfg.revID { + logDebugf("Ignoring new configuration as it has an older revision id") + return false + } + + cm.currentConfig = cfg + + return true +} + +func (cm *configManagementComponent) buildFirstRouteConfig(config *cfgBucket) *routeConfig { + if cm.networkType != "" && cm.networkType != "auto" { + return config.BuildRouteConfig(cm.useSSL, cm.networkType, true) + } + + defaultRouteConfig := config.BuildRouteConfig(cm.useSSL, "default", true) + + // Iterate over all of the source servers and check if any addresses match as default or external network types + for _, srcServer := range cm.srcServers { + // First we check if the source server is from the defaults list + srcInDefaultConfig := false + for _, endpoint := range defaultRouteConfig.kvServerList { + if endpoint == srcServer { + srcInDefaultConfig = true + } + } + for _, endpoint := range defaultRouteConfig.mgmtEpList { + if endpoint == srcServer { + srcInDefaultConfig = true + } + } + if srcInDefaultConfig { + cm.networkType = "default" + return defaultRouteConfig + } + + // Next lets see if we have an external config, if so, default to that + externalRouteCfg := config.BuildRouteConfig(cm.useSSL, "external", true) + if externalRouteCfg.IsValid() { + cm.networkType = "external" + return externalRouteCfg + } + } + + // If all else fails, default to the implicit default config + cm.networkType = "default" + return defaultRouteConfig +} + +func (cm *configManagementComponent) NetworkType() string { + return cm.networkType +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/configsnapshot.go b/vendor/github.com/couchbase/gocbcore/v9/configsnapshot.go new file mode 100644 index 000000000000..74035fa0ffc0 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/configsnapshot.go @@ -0,0 +1,95 @@ +package gocbcore + +// ConfigSnapshot is a snapshot of the underlying configuration currently in use. +type ConfigSnapshot struct { + state *kvMuxState +} + +// RevID returns the config revision for this snapshot. +func (pi ConfigSnapshot) RevID() int64 { + return pi.state.revID +} + +// KeyToVbucket translates a particular key to its assigned vbucket. +func (pi ConfigSnapshot) KeyToVbucket(key []byte) (uint16, error) { + if pi.state.vbMap == nil { + return 0, errUnsupportedOperation + } + return pi.state.vbMap.VbucketByKey(key), nil +} + +// KeyToServer translates a particular key to its assigned server index. +func (pi ConfigSnapshot) KeyToServer(key []byte, replicaIdx uint32) (int, error) { + if pi.state.vbMap != nil { + serverIdx, err := pi.state.vbMap.NodeByKey(key, replicaIdx) + if err != nil { + return 0, err + } + + return serverIdx, nil + } + + if pi.state.ketamaMap != nil { + serverIdx, err := pi.state.ketamaMap.NodeByKey(key) + if err != nil { + return 0, err + } + + return serverIdx, nil + } + + return 0, errCliInternalError +} + +// VbucketToServer returns the server index for a particular vbucket. +func (pi ConfigSnapshot) VbucketToServer(vbID uint16, replicaIdx uint32) (int, error) { + if pi.state.vbMap == nil { + return 0, errUnsupportedOperation + } + + serverIdx, err := pi.state.vbMap.NodeByVbucket(vbID, replicaIdx) + if err != nil { + return 0, err + } + + return serverIdx, nil +} + +// VbucketsOnServer returns the list of VBuckets for a server. +func (pi ConfigSnapshot) VbucketsOnServer(index int) ([]uint16, error) { + if pi.state.vbMap == nil { + return nil, errUnsupportedOperation + } + + return pi.state.vbMap.VbucketsOnServer(index) +} + +// NumVbuckets returns the number of VBuckets configured on the +// connected cluster. +func (pi ConfigSnapshot) NumVbuckets() (int, error) { + if pi.state.vbMap == nil { + return 0, errUnsupportedOperation + } + + return pi.state.vbMap.NumVbuckets(), nil +} + +// NumReplicas returns the number of replicas configured on the +// connected cluster. +func (pi ConfigSnapshot) NumReplicas() (int, error) { + if pi.state.vbMap == nil { + return 0, errUnsupportedOperation + } + + return pi.state.vbMap.NumReplicas(), nil +} + +// NumServers returns the number of servers accessible for K/V. +func (pi ConfigSnapshot) NumServers() (int, error) { + return pi.state.NumPipelines(), nil +} + +// BucketUUID returns the UUID of the bucket we are connected to. +func (pi ConfigSnapshot) BucketUUID() string { + return pi.state.uuid +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/connstr/README.md b/vendor/github.com/couchbase/gocbcore/v9/connstr/README.md new file mode 100644 index 000000000000..56e57f688fc7 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/connstr/README.md @@ -0,0 +1,45 @@ +# Couchbase Connection Strings for Go + +This library allows you to parse and resolve Couchbase Connection Strings in Go. +This is used by the Couchbase Go SDK, as well as various tools throughout the +Couchbase infrastructure. + + +## Using the Library + +To parse a connection string, simply call `Parse` with your connection string. +You will receive a `ConnSpec` structure representing the connection string`: + +```go +type Address struct { + Host string + Port int +} + +type ConnSpec struct { + Scheme string + Addresses []Address + Bucket string + Options map[string][]string +} +``` + +One you have a parsed connection string, you can also use our resolver to take +the `ConnSpec` and resolve any DNS SRV records as well as generate a list of +endpoints for the Couchbase server. You will receive a `ResolvedConnSpec` +structure in return: + +```go +type ResolvedConnSpec struct { + UseSsl bool + MemdHosts []Address + HttpHosts []Address + Bucket string + Options map[string][]string +} +``` + +## License +Copyright 2020 Couchbase Inc. + +Licensed under the Apache License, Version 2.0. diff --git a/vendor/github.com/couchbase/gocbcore/v9/connstr/connstr.go b/vendor/github.com/couchbase/gocbcore/v9/connstr/connstr.go new file mode 100644 index 000000000000..2912dc09af91 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/connstr/connstr.go @@ -0,0 +1,317 @@ +package connstr + +import ( + "errors" + "fmt" + "net" + "net/url" + "regexp" + "strconv" + "strings" +) + +const ( + // DefaultHttpPort is the default HTTP port to use to connect to Couchbase Server. + DefaultHttpPort = 8091 + + // DefaultSslHttpPort is the default HTTPS port to use to connect to Couchbase Server. + DefaultSslHttpPort = 18091 + + // DefaultMemdPort is the default memd port to use to connect to Couchbase Server. + DefaultMemdPort = 11210 + + // DefaultSslMemdPort is the default memd SSL port to use to connect to Couchbase Server. + DefaultSslMemdPort = 11207 +) + +func hostIsIpAddress(host string) bool { + if strings.HasPrefix(host, "[") { + // This is an IPv6 address + return true + } + if net.ParseIP(host) != nil { + // This is an IPv4 address + return true + } + return false +} + +// Address represents a host:port pair. +type Address struct { + Host string + Port int +} + +// ConnSpec describes a connection specification. +type ConnSpec struct { + Scheme string + Addresses []Address + Bucket string + Options map[string][]string +} + +func (spec ConnSpec) srvRecord() (string, string, string, bool) { + // Only `couchbase`-type schemes allow SRV records + if spec.Scheme != "couchbase" && spec.Scheme != "couchbases" { + return "", "", "", false + } + + // Must have only a single host, with no port specified + if len(spec.Addresses) != 1 || spec.Addresses[0].Port != -1 { + return "", "", "", false + } + + if hostIsIpAddress(spec.Addresses[0].Host) { + return "", "", "", false + } + + return spec.Scheme, "tcp", spec.Addresses[0].Host, true +} + +// SrvRecordName returns the record name for the ConnSpec. +func (spec ConnSpec) SrvRecordName() (recordName string) { + scheme, proto, host, isValid := spec.srvRecord() + if !isValid { + return "" + } + + return fmt.Sprintf("_%s._%s.%s", scheme, proto, host) +} + +// GetOption returns the specified option value for the ConnSpec. +func (spec ConnSpec) GetOption(name string) []string { + if opt, ok := spec.Options[name]; ok { + return opt + } + return nil +} + +// GetOptionString returns the specified option value for the ConnSpec. +func (spec ConnSpec) GetOptionString(name string) string { + opts := spec.GetOption(name) + if len(opts) > 0 { + return opts[0] + } + return "" +} + +// Parse parses the connection string into a ConnSpec. +func Parse(connStr string) (out ConnSpec, err error) { + partMatcher := regexp.MustCompile(`((.*):\/\/)?(([^\/?:]*)(:([^\/?:@]*))?@)?([^\/?]*)(\/([^\?]*))?(\?(.*))?`) + hostMatcher := regexp.MustCompile(`((\[[^\]]+\]+)|([^;\,\:]+))(:([0-9]*))?(;\,)?`) + parts := partMatcher.FindStringSubmatch(connStr) + + if parts[2] != "" { + out.Scheme = parts[2] + + switch out.Scheme { + case "couchbase": + case "couchbases": + case "http": + default: + err = errors.New("bad scheme") + return + } + } + + if parts[7] != "" { + hosts := hostMatcher.FindAllStringSubmatch(parts[7], -1) + for _, hostInfo := range hosts { + address := Address{ + Host: hostInfo[1], + Port: -1, + } + + if hostInfo[5] != "" { + address.Port, err = strconv.Atoi(hostInfo[5]) + if err != nil { + return + } + } + + out.Addresses = append(out.Addresses, address) + } + } + + if parts[9] != "" { + out.Bucket, err = url.QueryUnescape(parts[9]) + if err != nil { + return + } + } + + if parts[11] != "" { + out.Options, err = url.ParseQuery(parts[11]) + if err != nil { + return + } + } + + return +} + +func (spec ConnSpec) String() string { + var out string + + if spec.Scheme != "" { + out += fmt.Sprintf("%s://", spec.Scheme) + } + + for i, address := range spec.Addresses { + if i > 0 { + out += "," + } + + if address.Port >= 0 { + out += fmt.Sprintf("%s:%d", address.Host, address.Port) + } else { + out += address.Host + } + } + + if spec.Bucket != "" { + out += "/" + out += spec.Bucket + } + + urlOptions := url.Values(spec.Options) + if len(urlOptions) > 0 { + out += "?" + urlOptions.Encode() + } + + return out +} + +// ResolvedConnSpec is the result of resolving a ConnSpec. +type ResolvedConnSpec struct { + UseSsl bool + MemdHosts []Address + HttpHosts []Address + Bucket string + Options map[string][]string +} + +// Resolve parses a ConnSpec into a ResolvedConnSpec. +func Resolve(connSpec ConnSpec) (out ResolvedConnSpec, err error) { + defaultPort := 0 + hasExplicitScheme := false + isHttpScheme := false + useSsl := false + + switch connSpec.Scheme { + case "couchbase": + defaultPort = DefaultMemdPort + hasExplicitScheme = true + isHttpScheme = false + useSsl = false + case "couchbases": + defaultPort = DefaultSslMemdPort + hasExplicitScheme = true + isHttpScheme = false + useSsl = true + case "http": + defaultPort = DefaultHttpPort + hasExplicitScheme = true + isHttpScheme = true + useSsl = false + case "": + defaultPort = DefaultHttpPort + hasExplicitScheme = false + isHttpScheme = true + useSsl = false + default: + err = errors.New("bad scheme") + return + } + + var srvRecords []*net.SRV + srvScheme, srvProto, srvHost, srvIsValid := connSpec.srvRecord() + if srvIsValid { + _, addrs, err := net.LookupSRV(srvScheme, srvProto, srvHost) + if err == nil && len(addrs) > 0 { + srvRecords = addrs + } + } + + if srvRecords != nil { + for _, srv := range srvRecords { + out.MemdHosts = append(out.MemdHosts, Address{ + Host: strings.TrimSuffix(srv.Target, "."), + Port: int(srv.Port), + }) + } + } else if len(connSpec.Addresses) == 0 { + if useSsl { + out.MemdHosts = append(out.MemdHosts, Address{ + Host: "127.0.0.1", + Port: DefaultSslMemdPort, + }) + out.HttpHosts = append(out.HttpHosts, Address{ + Host: "127.0.0.1", + Port: DefaultSslHttpPort, + }) + } else { + out.MemdHosts = append(out.MemdHosts, Address{ + Host: "127.0.0.1", + Port: DefaultMemdPort, + }) + out.HttpHosts = append(out.HttpHosts, Address{ + Host: "127.0.0.1", + Port: DefaultHttpPort, + }) + } + } else { + for _, address := range connSpec.Addresses { + hasExplicitPort := address.Port > 0 + + if !hasExplicitScheme && hasExplicitPort && address.Port != defaultPort { + err = errors.New("ambiguous port without scheme") + return + } + + if hasExplicitScheme && !isHttpScheme && address.Port == DefaultHttpPort { + err = errors.New("couchbase://host:8091 not supported for couchbase:// scheme. Use couchbase://host") + return + } + + if address.Port <= 0 || address.Port == defaultPort || address.Port == DefaultHttpPort { + if useSsl { + out.MemdHosts = append(out.MemdHosts, Address{ + Host: address.Host, + Port: DefaultSslMemdPort, + }) + out.HttpHosts = append(out.HttpHosts, Address{ + Host: address.Host, + Port: DefaultSslHttpPort, + }) + } else { + out.MemdHosts = append(out.MemdHosts, Address{ + Host: address.Host, + Port: DefaultMemdPort, + }) + out.HttpHosts = append(out.HttpHosts, Address{ + Host: address.Host, + Port: DefaultHttpPort, + }) + } + } else { + if !isHttpScheme { + out.MemdHosts = append(out.MemdHosts, Address{ + Host: address.Host, + Port: address.Port, + }) + } else { + out.HttpHosts = append(out.HttpHosts, Address{ + Host: address.Host, + Port: address.Port, + }) + } + } + } + } + + out.UseSsl = useSsl + out.Bucket = connSpec.Bucket + out.Options = connSpec.Options + return +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/constants.go b/vendor/github.com/couchbase/gocbcore/v9/constants.go new file mode 100644 index 000000000000..fbaedad69662 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/constants.go @@ -0,0 +1,88 @@ +package gocbcore + +const ( + goCbCoreVersionStr = "v9.0.4" +) + +type bucketType int + +const ( + bktTypeNone = -1 + bktTypeInvalid bucketType = 0 + bktTypeCouchbase = iota + bktTypeMemcached = iota +) + +// ServiceType specifies a particular Couchbase service type. +type ServiceType int + +const ( + // MemdService represents a memcached service. + MemdService = ServiceType(1) + + // MgmtService represents a management service (typically ns_server). + MgmtService = ServiceType(2) + + // CapiService represents a CouchAPI service (typically for views). + CapiService = ServiceType(3) + + // N1qlService represents a N1QL service (typically for query). + N1qlService = ServiceType(4) + + // FtsService represents a full-text-search service. + FtsService = ServiceType(5) + + // CbasService represents an analytics service. + CbasService = ServiceType(6) +) + +// DcpAgentPriority specifies the priority level for a dcp stream +type DcpAgentPriority uint8 + +const ( + // DcpAgentPriorityLow sets the priority for the dcp stream to low + DcpAgentPriorityLow = DcpAgentPriority(0) + + // DcpAgentPriorityMed sets the priority for the dcp stream to medium + DcpAgentPriorityMed = DcpAgentPriority(1) + + // DcpAgentPriorityHigh sets the priority for the dcp stream to high + DcpAgentPriorityHigh = DcpAgentPriority(2) +) + +type durabilityLevelStatus uint32 + +const ( + durabilityLevelStatusUnknown = durabilityLevelStatus(0x00) + durabilityLevelStatusSupported = durabilityLevelStatus(0x01) + durabilityLevelStatusUnsupported = durabilityLevelStatus(0x02) +) + +type createAsDeletedStatus uint32 + +const ( + createAsDeletedStatusUnknown = createAsDeletedStatus(0x00) + createAsDeletedStatusSupported = createAsDeletedStatus(0x01) + createAsDeletedStatusUnsupported = createAsDeletedStatus(0x02) +) + +// ClusterCapability represents a capability that the cluster supports +type ClusterCapability uint32 + +const ( + // ClusterCapabilityEnhancedPreparedStatements represents that the cluster supports enhanced prepared statements. + ClusterCapabilityEnhancedPreparedStatements = ClusterCapability(0x01) +) + +// DCPBackfillOrder represents the order in which vBuckets will be backfilled by the cluster. +type DCPBackfillOrder uint8 + +const ( + // DCPBackfillOrderRoundRobin means that all the requested vBuckets will be backfilled together where each vBucket + // has some data backfilled before moving on to the next. This is the default behaviour. + DCPBackfillOrderRoundRobin DCPBackfillOrder = iota + 1 + + // DCPBackfillOrderSequential means that all the data for the first vBucket will be streamed before advancing onto + // the next vBucket. + DCPBackfillOrderSequential +) diff --git a/vendor/github.com/couchbase/gocbcore/v9/crud.go b/vendor/github.com/couchbase/gocbcore/v9/crud.go new file mode 100644 index 000000000000..01caf72972f9 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crud.go @@ -0,0 +1,19 @@ +package gocbcore + +// Cas represents a unique revision of a document. This can be used +// to perform optimistic locking. +type Cas uint64 + +// VbUUID represents a unique identifier for a particular vbucket history. +type VbUUID uint64 + +// SeqNo is a sequential mutation number indicating the order and precise +// position of a write that has occurred. +type SeqNo uint64 + +// MutationToken represents a particular mutation within the cluster. +type MutationToken struct { + VbID uint16 + VbUUID VbUUID + SeqNo SeqNo +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/crud_dura.go b/vendor/github.com/couchbase/gocbcore/v9/crud_dura.go new file mode 100644 index 000000000000..8f953ae1214c --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crud_dura.go @@ -0,0 +1,50 @@ +package gocbcore + +import ( + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +// ObserveOptions encapsulates the parameters for a ObserveEx operation. +type ObserveOptions struct { + Key []byte + ReplicaIdx int + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// ObserveVbOptions encapsulates the parameters for a ObserveVbEx operation. +type ObserveVbOptions struct { + VbID uint16 + VbUUID VbUUID + ReplicaIdx int + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// ObserveResult encapsulates the result of a ObserveEx operation. +type ObserveResult struct { + KeyState memd.KeyState + Cas Cas +} + +// ObserveVbResult encapsulates the result of a ObserveVbEx operation. +type ObserveVbResult struct { + DidFailover bool + VbID uint16 + VbUUID VbUUID + PersistSeqNo SeqNo + CurrentSeqNo SeqNo + OldVbUUID VbUUID + LastSeqNo SeqNo +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/crud_options.go b/vendor/github.com/couchbase/gocbcore/v9/crud_options.go new file mode 100644 index 000000000000..f26099b5c540 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crud_options.go @@ -0,0 +1,300 @@ +package gocbcore + +import ( + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +// GetOptions encapsulates the parameters for a GetEx operation. +type GetOptions struct { + Key []byte + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetAndTouchOptions encapsulates the parameters for a GetAndTouchEx operation. +type GetAndTouchOptions struct { + Key []byte + Expiry uint32 + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetAndLockOptions encapsulates the parameters for a GetAndLockEx operation. +type GetAndLockOptions struct { + Key []byte + LockTime uint32 + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetAnyReplicaOptions encapsulates the parameters for a GetAnyReplicaEx operation. +type GetAnyReplicaOptions struct { + Key []byte + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetOneReplicaOptions encapsulates the parameters for a GetOneReplicaEx operation. +type GetOneReplicaOptions struct { + Key []byte + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + ReplicaIdx int + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// TouchOptions encapsulates the parameters for a TouchEx operation. +type TouchOptions struct { + Key []byte + Expiry uint32 + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// UnlockOptions encapsulates the parameters for a UnlockEx operation. +type UnlockOptions struct { + Key []byte + Cas Cas + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// DeleteOptions encapsulates the parameters for a DeleteEx operation. +type DeleteOptions struct { + Key []byte + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Cas Cas + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// AddOptions encapsulates the parameters for a AddEx operation. +type AddOptions struct { + Key []byte + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Value []byte + Flags uint32 + Datatype uint8 + Expiry uint32 + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +type storeOptions struct { + Key []byte + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas + Expiry uint32 + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// SetOptions encapsulates the parameters for a SetEx operation. +type SetOptions struct { + Key []byte + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Value []byte + Flags uint32 + Datatype uint8 + Expiry uint32 + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// ReplaceOptions encapsulates the parameters for a ReplaceEx operation. +type ReplaceOptions struct { + Key []byte + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas + Expiry uint32 + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// AdjoinOptions encapsulates the parameters for a AppendEx or PrependEx operation. +type AdjoinOptions struct { + Key []byte + Value []byte + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Cas Cas + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// CounterOptions encapsulates the parameters for a IncrementEx or DecrementEx operation. +type CounterOptions struct { + Key []byte + Delta uint64 + Initial uint64 + Expiry uint32 + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + Cas Cas + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetRandomOptions encapsulates the parameters for a GetRandomEx operation. +type GetRandomOptions struct { + RetryStrategy RetryStrategy + Deadline time.Time + + CollectionName string + ScopeName string + CollectionID uint32 + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// GetMetaOptions encapsulates the parameters for a GetMetaEx operation. +type GetMetaOptions struct { + Key []byte + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// SetMetaOptions encapsulates the parameters for a SetMetaEx operation. +type SetMetaOptions struct { + Key []byte + Value []byte + Extra []byte + Datatype uint8 + Options uint32 + Flags uint32 + Expiry uint32 + Cas Cas + RevNo uint64 + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// DeleteMetaOptions encapsulates the parameters for a DeleteMetaEx operation. +type DeleteMetaOptions struct { + Key []byte + Value []byte + Extra []byte + Datatype uint8 + Options uint32 + Flags uint32 + Expiry uint32 + Cas Cas + RevNo uint64 + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/crud_results.go b/vendor/github.com/couchbase/gocbcore/v9/crud_results.go new file mode 100644 index 000000000000..4cf8ca3fb5de --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crud_results.go @@ -0,0 +1,102 @@ +package gocbcore + +// GetResult encapsulates the result of a GetEx operation. +type GetResult struct { + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas +} + +// GetAndTouchResult encapsulates the result of a GetAndTouchEx operation. +type GetAndTouchResult struct { + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas +} + +// GetAndLockResult encapsulates the result of a GetAndLockEx operation. +type GetAndLockResult struct { + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas +} + +// GetReplicaResult encapsulates the result of a GetReplica operation. +type GetReplicaResult struct { + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas +} + +// TouchResult encapsulates the result of a TouchEx operation. +type TouchResult struct { + Cas Cas + MutationToken MutationToken +} + +// UnlockResult encapsulates the result of a UnlockEx operation. +type UnlockResult struct { + Cas Cas + MutationToken MutationToken +} + +// DeleteResult encapsulates the result of a DeleteEx operation. +type DeleteResult struct { + Cas Cas + MutationToken MutationToken +} + +// StoreResult encapsulates the result of a AddEx, SetEx or ReplaceEx operation. +type StoreResult struct { + Cas Cas + MutationToken MutationToken +} + +// AdjoinResult encapsulates the result of a AppendEx or PrependEx operation. +type AdjoinResult struct { + Cas Cas + MutationToken MutationToken +} + +// CounterResult encapsulates the result of a IncrementEx or DecrementEx operation. +type CounterResult struct { + Value uint64 + Cas Cas + MutationToken MutationToken +} + +// GetRandomResult encapsulates the result of a GetRandomEx operation. +type GetRandomResult struct { + Key []byte + Value []byte + Flags uint32 + Datatype uint8 + Cas Cas +} + +// GetMetaResult encapsulates the result of a GetMetaEx operation. +type GetMetaResult struct { + Value []byte + Flags uint32 + Cas Cas + Expiry uint32 + SeqNo SeqNo + Datatype uint8 + Deleted uint32 +} + +// SetMetaResult encapsulates the result of a SetMetaEx operation. +type SetMetaResult struct { + Cas Cas + MutationToken MutationToken +} + +// DeleteMetaResult encapsulates the result of a DeleteMetaEx operation. +type DeleteMetaResult struct { + Cas Cas + MutationToken MutationToken +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/crud_subdoc.go b/vendor/github.com/couchbase/gocbcore/v9/crud_subdoc.go new file mode 100644 index 000000000000..f35c50bb90ac --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crud_subdoc.go @@ -0,0 +1,138 @@ +package gocbcore + +import ( + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +// GetInOptions encapsulates the parameters for a GetInEx operation. +type GetInOptions struct { + Key []byte + Path string + Flags memd.SubdocFlag + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// ExistsInOptions encapsulates the parameters for a ExistsInEx operation. +type ExistsInOptions struct { + Key []byte + Path string + Flags memd.SubdocFlag + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// StoreInOptions encapsulates the parameters for a SetInEx, AddInEx, ReplaceInEx, +// PushFrontInEx, PushBackInEx, ArrayInsertInEx or AddUniqueInEx operation. +type StoreInOptions struct { + Key []byte + Path string + Value []byte + Flags memd.SubdocFlag + Cas Cas + Expiry uint32 + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout uint16 + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// CounterInOptions encapsulates the parameters for a CounterInEx operation. +type CounterInOptions StoreInOptions + +// DeleteInOptions encapsulates the parameters for a DeleteInEx operation. +type DeleteInOptions struct { + Key []byte + Path string + Cas Cas + Expiry uint32 + Flags memd.SubdocFlag + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout uint16 + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// LookupInOptions encapsulates the parameters for a LookupInEx operation. +type LookupInOptions struct { + Key []byte + Flags memd.SubdocDocFlag + Ops []SubDocOp + CollectionName string + ScopeName string + CollectionID uint32 + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// MutateInOptions encapsulates the parameters for a MutateInEx operation. +type MutateInOptions struct { + Key []byte + Flags memd.SubdocDocFlag + Cas Cas + Expiry uint32 + Ops []SubDocOp + CollectionName string + ScopeName string + RetryStrategy RetryStrategy + DurabilityLevel memd.DurabilityLevel + DurabilityLevelTimeout time.Duration + CollectionID uint32 + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// SubDocResult encapsulates the results from a single sub-document operation. +type SubDocResult struct { + Err error + Value []byte +} + +// LookupInResult encapsulates the result of a LookupInEx operation. +type LookupInResult struct { + Cas Cas + Ops []SubDocResult + + // Internal: This should never be used and is not supported. + Internal struct { + IsDeleted bool + } +} + +// MutateInResult encapsulates the result of a MutateInEx operation. +type MutateInResult struct { + Cas Cas + MutationToken MutationToken + Ops []SubDocResult +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/crudcomponent.go b/vendor/github.com/couchbase/gocbcore/v9/crudcomponent.go new file mode 100644 index 000000000000..0d0e236cd561 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crudcomponent.go @@ -0,0 +1,1255 @@ +package gocbcore + +import ( + "encoding/binary" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type crudComponent struct { + cidMgr *collectionsComponent + defaultRetryStrategy RetryStrategy + tracer *tracerComponent + errMapManager *errMapComponent + featureVerifier kvFeatureVerifier +} + +func newCRUDComponent(cidMgr *collectionsComponent, defaultRetryStrategy RetryStrategy, tracerCmpt *tracerComponent, + errMapManager *errMapComponent, featureVerifier kvFeatureVerifier) *crudComponent { + return &crudComponent{ + cidMgr: cidMgr, + defaultRetryStrategy: defaultRetryStrategy, + tracer: tracerCmpt, + errMapManager: errMapManager, + featureVerifier: featureVerifier, + } +} + +func (crud *crudComponent) Get(opts GetOptions, cb GetCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("Get", opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Extras) != 4 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + res := GetResult{} + res.Value = resp.Value + res.Flags = binary.BigEndian.Uint32(resp.Extras[0:]) + res.Cas = Cas(resp.Cas) + res.Datatype = resp.Datatype + + tracer.Finish() + cb(&res, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGet, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "Get", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) GetAndTouch(opts GetAndTouchOptions, cb GetAndTouchCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("GetAndTouch", opts.TraceContext) + + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Extras) != 4 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + flags := binary.BigEndian.Uint32(resp.Extras[0:]) + + tracer.Finish() + cb(&GetAndTouchResult{ + Value: resp.Value, + Flags: flags, + Cas: Cas(resp.Cas), + Datatype: resp.Datatype, + }, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + extraBuf := make([]byte, 4) + binary.BigEndian.PutUint32(extraBuf[0:], opts.Expiry) + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGAT, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "GetAndTouch", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) GetAndLock(opts GetAndLockOptions, cb GetAndLockCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("GetAndLock", opts.TraceContext) + + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Extras) != 4 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + flags := binary.BigEndian.Uint32(resp.Extras[0:]) + + tracer.Finish() + cb(&GetAndLockResult{ + Value: resp.Value, + Flags: flags, + Cas: Cas(resp.Cas), + Datatype: resp.Datatype, + }, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + extraBuf := make([]byte, 4) + binary.BigEndian.PutUint32(extraBuf[0:], opts.LockTime) + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetLocked, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "GetAndLock", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) GetOneReplica(opts GetOneReplicaOptions, cb GetReplicaCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("GetOneReplica", opts.TraceContext) + + if opts.ReplicaIdx <= 0 { + tracer.Finish() + return nil, errInvalidReplica + } + + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + cb(nil, err) + return + } + + if len(resp.Extras) != 4 { + cb(nil, errProtocol) + return + } + + flags := binary.BigEndian.Uint32(resp.Extras[0:]) + + cb(&GetReplicaResult{ + Value: resp.Value, + Flags: flags, + Cas: Cas(resp.Cas), + Datatype: resp.Datatype, + }, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetReplica, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + ReplicaIdx: opts.ReplicaIdx, + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "GetOneReplica", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) Touch(opts TouchOptions, cb TouchCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("Touch", opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&TouchResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + extraBuf := make([]byte, 4) + binary.BigEndian.PutUint32(extraBuf[0:], opts.Expiry) + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdTouch, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "Touch", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) Unlock(opts UnlockOptions, cb UnlockCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("Unlock", opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&UnlockResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdUnlockKey, + Datatype: 0, + Cas: uint64(opts.Cas), + Extras: nil, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "Unlock", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) Delete(opts DeleteOptions, cb DeleteCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("Delete", opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&DeleteResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + var duraLevelFrame *memd.DurabilityLevelFrame + var duraTimeoutFrame *memd.DurabilityTimeoutFrame + if opts.DurabilityLevel > 0 { + if crud.featureVerifier.HasDurabilityLevelStatus(durabilityLevelStatusUnsupported) { + return nil, errFeatureNotAvailable + } + duraLevelFrame = &memd.DurabilityLevelFrame{ + DurabilityLevel: opts.DurabilityLevel, + } + duraTimeoutFrame = &memd.DurabilityTimeoutFrame{ + DurabilityTimeout: opts.DurabilityLevelTimeout, + } + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDelete, + Datatype: 0, + Cas: uint64(opts.Cas), + Extras: nil, + Key: opts.Key, + Value: nil, + DurabilityLevelFrame: duraLevelFrame, + DurabilityTimeoutFrame: duraTimeoutFrame, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "Delete", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) store(opName string, opcode memd.CmdCode, opts storeOptions, cb StoreCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace(opName, opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&StoreResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + var duraLevelFrame *memd.DurabilityLevelFrame + var duraTimeoutFrame *memd.DurabilityTimeoutFrame + if opts.DurabilityLevel > 0 { + if crud.featureVerifier.HasDurabilityLevelStatus(durabilityLevelStatusUnsupported) { + return nil, errFeatureNotAvailable + } + duraLevelFrame = &memd.DurabilityLevelFrame{ + DurabilityLevel: opts.DurabilityLevel, + } + duraTimeoutFrame = &memd.DurabilityTimeoutFrame{ + DurabilityTimeout: opts.DurabilityLevelTimeout, + } + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + extraBuf := make([]byte, 8) + binary.BigEndian.PutUint32(extraBuf[0:], opts.Flags) + binary.BigEndian.PutUint32(extraBuf[4:], opts.Expiry) + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: opcode, + Datatype: opts.Datatype, + Cas: uint64(opts.Cas), + Extras: extraBuf, + Key: opts.Key, + Value: opts.Value, + DurabilityLevelFrame: duraLevelFrame, + DurabilityTimeoutFrame: duraTimeoutFrame, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: opName, + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) Set(opts SetOptions, cb StoreCallback) (PendingOp, error) { + return crud.store("Set", memd.CmdSet, storeOptions{ + Key: opts.Key, + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + Value: opts.Value, + Flags: opts.Flags, + Datatype: opts.Datatype, + Cas: 0, + Expiry: opts.Expiry, + TraceContext: opts.TraceContext, + DurabilityLevel: opts.DurabilityLevel, + DurabilityLevelTimeout: opts.DurabilityLevelTimeout, + CollectionID: opts.CollectionID, + Deadline: opts.Deadline, + }, cb) +} + +func (crud *crudComponent) Add(opts AddOptions, cb StoreCallback) (PendingOp, error) { + return crud.store("Add", memd.CmdAdd, storeOptions{ + Key: opts.Key, + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + Value: opts.Value, + Flags: opts.Flags, + Datatype: opts.Datatype, + Cas: 0, + Expiry: opts.Expiry, + TraceContext: opts.TraceContext, + DurabilityLevel: opts.DurabilityLevel, + DurabilityLevelTimeout: opts.DurabilityLevelTimeout, + CollectionID: opts.CollectionID, + Deadline: opts.Deadline, + }, cb) +} + +func (crud *crudComponent) Replace(opts ReplaceOptions, cb StoreCallback) (PendingOp, error) { + return crud.store("Replace", memd.CmdReplace, storeOptions(opts), cb) +} + +func (crud *crudComponent) adjoin(opName string, opcode memd.CmdCode, opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace(opName, opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&AdjoinResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + var duraLevelFrame *memd.DurabilityLevelFrame + var duraTimeoutFrame *memd.DurabilityTimeoutFrame + if opts.DurabilityLevel > 0 { + if crud.featureVerifier.HasDurabilityLevelStatus(durabilityLevelStatusUnsupported) { + return nil, errFeatureNotAvailable + } + duraLevelFrame = &memd.DurabilityLevelFrame{ + DurabilityLevel: opts.DurabilityLevel, + } + duraTimeoutFrame = &memd.DurabilityTimeoutFrame{ + DurabilityTimeout: opts.DurabilityLevelTimeout, + } + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: opcode, + Datatype: 0, + Cas: uint64(opts.Cas), + Extras: nil, + Key: opts.Key, + Value: opts.Value, + DurabilityLevelFrame: duraLevelFrame, + DurabilityTimeoutFrame: duraTimeoutFrame, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: opName, + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) Append(opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) { + return crud.adjoin("Append", memd.CmdAppend, opts, cb) +} + +func (crud *crudComponent) Prepend(opts AdjoinOptions, cb AdjoinCallback) (PendingOp, error) { + return crud.adjoin("Prepend", memd.CmdPrepend, opts, cb) +} + +func (crud *crudComponent) counter(opName string, opcode memd.CmdCode, opts CounterOptions, cb CounterCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace(opName, opts.TraceContext) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Value) != 8 { + tracer.Finish() + cb(nil, errProtocol) + return + } + intVal := binary.BigEndian.Uint64(resp.Value) + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&CounterResult{ + Value: intVal, + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + // You cannot have an expiry when you do not want to create the document. + if opts.Initial == uint64(0xFFFFFFFFFFFFFFFF) && opts.Expiry != 0 { + return nil, errInvalidArgument + } + + var duraLevelFrame *memd.DurabilityLevelFrame + var duraTimeoutFrame *memd.DurabilityTimeoutFrame + if opts.DurabilityLevel > 0 { + if crud.featureVerifier.HasDurabilityLevelStatus(durabilityLevelStatusUnsupported) { + return nil, errFeatureNotAvailable + } + duraLevelFrame = &memd.DurabilityLevelFrame{ + DurabilityLevel: opts.DurabilityLevel, + } + duraTimeoutFrame = &memd.DurabilityTimeoutFrame{ + DurabilityTimeout: opts.DurabilityLevelTimeout, + } + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + extraBuf := make([]byte, 20) + binary.BigEndian.PutUint64(extraBuf[0:], opts.Delta) + if opts.Initial != uint64(0xFFFFFFFFFFFFFFFF) { + binary.BigEndian.PutUint64(extraBuf[8:], opts.Initial) + binary.BigEndian.PutUint32(extraBuf[16:], opts.Expiry) + } else { + binary.BigEndian.PutUint64(extraBuf[8:], 0x0000000000000000) + binary.BigEndian.PutUint32(extraBuf[16:], 0xFFFFFFFF) + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: opcode, + Datatype: 0, + Cas: uint64(opts.Cas), + Extras: extraBuf, + Key: opts.Key, + Value: nil, + DurabilityLevelFrame: duraLevelFrame, + DurabilityTimeoutFrame: duraTimeoutFrame, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: opName, + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) Increment(opts CounterOptions, cb CounterCallback) (PendingOp, error) { + return crud.counter("Increment", memd.CmdIncrement, opts, cb) +} + +func (crud *crudComponent) Decrement(opts CounterOptions, cb CounterCallback) (PendingOp, error) { + return crud.counter("Decrement", memd.CmdDecrement, opts, cb) +} + +func (crud *crudComponent) GetRandom(opts GetRandomOptions, cb GetRandomCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("GetRandom", opts.TraceContext) + + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Extras) != 4 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + flags := binary.BigEndian.Uint32(resp.Extras[0:]) + + tracer.Finish() + cb(&GetRandomResult{ + Key: resp.Key, + Value: resp.Value, + Flags: flags, + Cas: Cas(resp.Cas), + Datatype: resp.Datatype, + }, nil) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetRandom, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: nil, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + RetryStrategy: opts.RetryStrategy, + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "GetRandom", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) GetMeta(opts GetMetaOptions, cb GetMetaCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("GetMeta", nil) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Extras) != 21 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + deleted := binary.BigEndian.Uint32(resp.Extras[0:]) + flags := binary.BigEndian.Uint32(resp.Extras[4:]) + expTime := binary.BigEndian.Uint32(resp.Extras[8:]) + seqNo := SeqNo(binary.BigEndian.Uint64(resp.Extras[12:])) + dataType := resp.Extras[20] + + tracer.Finish() + cb(&GetMetaResult{ + Value: resp.Value, + Flags: flags, + Cas: Cas(resp.Cas), + Expiry: expTime, + SeqNo: seqNo, + Datatype: dataType, + Deleted: deleted, + }, nil) + } + + extraBuf := make([]byte, 1) + extraBuf[0] = 2 + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetMeta, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: nil, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "GetMeta", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) SetMeta(opts SetMetaOptions, cb SetMetaCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("SetMeta", nil) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&SetMetaResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + extraBuf := make([]byte, 30+len(opts.Extra)) + binary.BigEndian.PutUint32(extraBuf[0:], opts.Flags) + binary.BigEndian.PutUint32(extraBuf[4:], opts.Expiry) + binary.BigEndian.PutUint64(extraBuf[8:], opts.RevNo) + binary.BigEndian.PutUint64(extraBuf[16:], uint64(opts.Cas)) + binary.BigEndian.PutUint32(extraBuf[24:], opts.Options) + binary.BigEndian.PutUint16(extraBuf[28:], uint16(len(opts.Extra))) + copy(extraBuf[30:], opts.Extra) + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSetMeta, + Datatype: opts.Datatype, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: opts.Value, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "SetMeta", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) DeleteMeta(opts DeleteMetaOptions, cb DeleteMetaCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("DeleteMeta", nil) + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&DeleteMetaResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + }, nil) + } + + extraBuf := make([]byte, 30+len(opts.Extra)) + binary.BigEndian.PutUint32(extraBuf[0:], opts.Flags) + binary.BigEndian.PutUint32(extraBuf[4:], opts.Expiry) + binary.BigEndian.PutUint64(extraBuf[8:], opts.RevNo) + binary.BigEndian.PutUint64(extraBuf[16:], uint64(opts.Cas)) + binary.BigEndian.PutUint32(extraBuf[24:], opts.Options) + binary.BigEndian.PutUint16(extraBuf[28:], uint16(len(opts.Extra))) + copy(extraBuf[30:], opts.Extra) + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDelMeta, + Datatype: opts.Datatype, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: opts.Value, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "DeleteMeta", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/crudcomponent_subdoc.go b/vendor/github.com/couchbase/gocbcore/v9/crudcomponent_subdoc.go new file mode 100644 index 000000000000..397d8e27da25 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/crudcomponent_subdoc.go @@ -0,0 +1,361 @@ +package gocbcore + +import ( + "encoding/binary" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type subdocOpList struct { + ops []SubDocOp + indexes []int +} + +func (sol *subdocOpList) Reorder(ops []SubDocOp) { + var xAttrOps []SubDocOp + var xAttrIndexes []int + var sops []SubDocOp + var opIndexes []int + for i, op := range ops { + if op.Flags&memd.SubdocFlagXattrPath != 0 { + xAttrOps = append(xAttrOps, op) + xAttrIndexes = append(xAttrIndexes, i) + } else { + sops = append(sops, op) + opIndexes = append(opIndexes, i) + } + } + + sol.ops = append(xAttrOps, sops...) + sol.indexes = append(xAttrIndexes, opIndexes...) +} +func (crud *crudComponent) LookupIn(opts LookupInOptions, cb LookupInCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("LookupIn", opts.TraceContext) + + results := make([]SubDocResult, len(opts.Ops)) + var subdocs subdocOpList + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil && + !isErrorStatus(err, memd.StatusSubDocMultiPathFailureDeleted) && + !isErrorStatus(err, memd.StatusSubDocSuccessDeleted) && + !isErrorStatus(err, memd.StatusSubDocBadMulti) { + tracer.Finish() + cb(nil, err) + return + } + + respIter := 0 + for i := range results { + if respIter+6 > len(resp.Value) { + tracer.Finish() + cb(nil, errProtocol) + return + } + + resError := memd.StatusCode(binary.BigEndian.Uint16(resp.Value[respIter+0:])) + resValueLen := int(binary.BigEndian.Uint32(resp.Value[respIter+2:])) + + if respIter+6+resValueLen > len(resp.Value) { + tracer.Finish() + cb(nil, errProtocol) + return + } + + if resError != memd.StatusSuccess { + results[subdocs.indexes[i]].Err = crud.makeSubDocError(i, resError, req, resp) + } + + results[subdocs.indexes[i]].Value = resp.Value[respIter+6 : respIter+6+resValueLen] + respIter += 6 + resValueLen + } + + tracer.Finish() + cb(&LookupInResult{ + Cas: Cas(resp.Cas), + Ops: results, + Internal: struct{ IsDeleted bool }{ + IsDeleted: isErrorStatus(err, memd.StatusSubDocSuccessDeleted) || + isErrorStatus(err, memd.StatusSubDocMultiPathFailureDeleted), + }, + }, nil) + } + + subdocs.Reorder(opts.Ops) + + pathBytesList := make([][]byte, len(opts.Ops)) + pathBytesTotal := 0 + for i, op := range subdocs.ops { + pathBytes := []byte(op.Path) + pathBytesList[i] = pathBytes + pathBytesTotal += len(pathBytes) + } + + valueBuf := make([]byte, len(opts.Ops)*4+pathBytesTotal) + + valueIter := 0 + for i, op := range subdocs.ops { + if op.Op != memd.SubDocOpGet && op.Op != memd.SubDocOpExists && + op.Op != memd.SubDocOpGetDoc && op.Op != memd.SubDocOpGetCount { + return nil, errInvalidArgument + } + if op.Value != nil { + return nil, errInvalidArgument + } + + pathBytes := pathBytesList[i] + pathBytesLen := len(pathBytes) + + valueBuf[valueIter+0] = uint8(op.Op) + valueBuf[valueIter+1] = uint8(op.Flags) + binary.BigEndian.PutUint16(valueBuf[valueIter+2:], uint16(pathBytesLen)) + copy(valueBuf[valueIter+4:], pathBytes) + valueIter += 4 + pathBytesLen + } + + var extraBuf []byte + if opts.Flags != 0 { + extraBuf = append(extraBuf, uint8(opts.Flags)) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSubDocMultiLookup, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: opts.Key, + Value: valueBuf, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "LookupIn", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) MutateIn(opts MutateInOptions, cb MutateInCallback) (PendingOp, error) { + tracer := crud.tracer.CreateOpTrace("MutateIn", opts.TraceContext) + + results := make([]SubDocResult, len(opts.Ops)) + var subdocs subdocOpList + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + if err != nil && + !isErrorStatus(err, memd.StatusSubDocSuccessDeleted) && + !isErrorStatus(err, memd.StatusSubDocBadMulti) { + tracer.Finish() + cb(nil, err) + return + } + + if isErrorStatus(err, memd.StatusSubDocBadMulti) { + if len(resp.Value) != 3 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + opIndex := int(resp.Value[0]) + resError := memd.StatusCode(binary.BigEndian.Uint16(resp.Value[1:])) + + err := crud.makeSubDocError(opIndex, resError, req, resp) + tracer.Finish() + cb(nil, err) + return + } + + for readPos := uint32(0); readPos < uint32(len(resp.Value)); { + opIndex := int(resp.Value[readPos+0]) + opStatus := memd.StatusCode(binary.BigEndian.Uint16(resp.Value[readPos+1:])) + + results[subdocs.indexes[opIndex]].Err = crud.makeSubDocError(opIndex, opStatus, req, resp) + readPos += 3 + + if opStatus == memd.StatusSuccess { + valLength := binary.BigEndian.Uint32(resp.Value[readPos:]) + results[subdocs.indexes[opIndex]].Value = resp.Value[readPos+4 : readPos+4+valLength] + readPos += 4 + valLength + } + } + + mutToken := MutationToken{} + if len(resp.Extras) >= 16 { + mutToken.VbID = req.Vbucket + mutToken.VbUUID = VbUUID(binary.BigEndian.Uint64(resp.Extras[0:])) + mutToken.SeqNo = SeqNo(binary.BigEndian.Uint64(resp.Extras[8:])) + } + + tracer.Finish() + cb(&MutateInResult{ + Cas: Cas(resp.Cas), + MutationToken: mutToken, + Ops: results, + }, nil) + } + + var duraLevelFrame *memd.DurabilityLevelFrame + var duraTimeoutFrame *memd.DurabilityTimeoutFrame + if opts.DurabilityLevel > 0 { + if crud.featureVerifier.HasDurabilityLevelStatus(durabilityLevelStatusUnsupported) { + return nil, errFeatureNotAvailable + } + duraLevelFrame = &memd.DurabilityLevelFrame{ + DurabilityLevel: opts.DurabilityLevel, + } + duraTimeoutFrame = &memd.DurabilityTimeoutFrame{ + DurabilityTimeout: opts.DurabilityLevelTimeout, + } + } + + if opts.Flags&memd.SubdocDocFlagCreateAsDeleted != 0 { + // We can get here before support status is actually known, we'll send the request unless we know for a fact + // that this is unsupported. + if crud.featureVerifier.HasCreateAsDeletedStatus(createAsDeletedStatusUnsupported) { + return nil, errFeatureNotAvailable + } + } + + subdocs.Reorder(opts.Ops) + + pathBytesList := make([][]byte, len(opts.Ops)) + pathBytesTotal := 0 + valueBytesTotal := 0 + for i, op := range subdocs.ops { + pathBytes := []byte(op.Path) + pathBytesList[i] = pathBytes + pathBytesTotal += len(pathBytes) + valueBytesTotal += len(op.Value) + } + + valueBuf := make([]byte, len(opts.Ops)*8+pathBytesTotal+valueBytesTotal) + + valueIter := 0 + for i, op := range subdocs.ops { + if op.Op != memd.SubDocOpDictAdd && op.Op != memd.SubDocOpDictSet && + op.Op != memd.SubDocOpDelete && op.Op != memd.SubDocOpReplace && + op.Op != memd.SubDocOpArrayPushLast && op.Op != memd.SubDocOpArrayPushFirst && + op.Op != memd.SubDocOpArrayInsert && op.Op != memd.SubDocOpArrayAddUnique && + op.Op != memd.SubDocOpCounter && op.Op != memd.SubDocOpSetDoc && + op.Op != memd.SubDocOpAddDoc && op.Op != memd.SubDocOpDeleteDoc { + return nil, errInvalidArgument + } + + pathBytes := pathBytesList[i] + pathBytesLen := len(pathBytes) + valueBytesLen := len(op.Value) + + valueBuf[valueIter+0] = uint8(op.Op) + valueBuf[valueIter+1] = uint8(op.Flags) + binary.BigEndian.PutUint16(valueBuf[valueIter+2:], uint16(pathBytesLen)) + binary.BigEndian.PutUint32(valueBuf[valueIter+4:], uint32(valueBytesLen)) + copy(valueBuf[valueIter+8:], pathBytes) + copy(valueBuf[valueIter+8+pathBytesLen:], op.Value) + valueIter += 8 + pathBytesLen + valueBytesLen + } + + var extraBuf []byte + if opts.Expiry != 0 { + tmpBuf := make([]byte, 4) + binary.BigEndian.PutUint32(tmpBuf[0:], opts.Expiry) + extraBuf = append(extraBuf, tmpBuf...) + } + if opts.Flags != 0 { + extraBuf = append(extraBuf, uint8(opts.Flags)) + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = crud.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSubDocMultiMutation, + Datatype: 0, + Cas: uint64(opts.Cas), + Extras: extraBuf, + Key: opts.Key, + Value: valueBuf, + DurabilityLevelFrame: duraLevelFrame, + DurabilityTimeoutFrame: duraTimeoutFrame, + CollectionID: opts.CollectionID, + }, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := crud.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "MutateIn", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (crud *crudComponent) makeSubDocError(index int, code memd.StatusCode, req *memdQRequest, resp *memdQResponse) error { + err := getKvStatusCodeError(code) + err = translateMemdError(err, req) + err = crud.errMapManager.EnhanceKvError(err, resp, req) + + return SubDocumentError{ + Index: index, + InnerError: err, + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/dcp.go b/vendor/github.com/couchbase/gocbcore/v9/dcp.go new file mode 100644 index 000000000000..ddb22ed13d5b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/dcp.go @@ -0,0 +1,105 @@ +package gocbcore + +// OpenStreamFilterOptions are the filtering options available to the OpenStream operation. +type OpenStreamFilterOptions struct { + ScopeID uint32 + CollectionIDs []uint32 +} + +// OpenStreamStreamOptions are the stream options available to the OpenStream operation. +type OpenStreamStreamOptions struct { + StreamID uint16 +} + +// OpenStreamManifestOptions are the manifest options available to the OpenStream operation. +type OpenStreamManifestOptions struct { + ManifestUID uint64 +} + +// OpenStreamOptions are the options available to the OpenStream operation. +type OpenStreamOptions struct { + FilterOptions *OpenStreamFilterOptions + StreamOptions *OpenStreamStreamOptions + ManifestOptions *OpenStreamManifestOptions +} + +// GetVbucketSeqnoFilterOptions are the filter options available to the GetVbucketSeqno operation. +type GetVbucketSeqnoFilterOptions struct { + CollectionID uint32 +} + +// GetVbucketSeqnoOptions are the options available to the GetVbucketSeqno operation. +type GetVbucketSeqnoOptions struct { + FilterOptions *GetVbucketSeqnoFilterOptions +} + +// CloseStreamStreamOptions are the stream options available to the CloseStream operation. +type CloseStreamStreamOptions struct { + StreamID uint16 +} + +// CloseStreamOptions are the options available to the CloseStream operation. +type CloseStreamOptions struct { + StreamOptions *CloseStreamStreamOptions +} + +// SnapshotState represents the state of a particular cluster snapshot. +type SnapshotState uint32 + +// HasInMemory returns whether this snapshot is available in memory. +func (s SnapshotState) HasInMemory() bool { + return uint32(s)&1 != 0 +} + +// HasOnDisk returns whether this snapshot is available on disk. +func (s SnapshotState) HasOnDisk() bool { + return uint32(s)&2 != 0 +} + +// FailoverEntry represents a single entry in the server fail-over log. +type FailoverEntry struct { + VbUUID VbUUID + SeqNo SeqNo +} + +// StreamObserver provides an interface to receive events from a running DCP stream. +type StreamObserver interface { + SnapshotMarker(startSeqNo, endSeqNo uint64, vbID uint16, streamID uint16, snapshotType SnapshotState) + Mutation(seqNo, revNo uint64, flags, expiry, lockTime uint32, cas uint64, datatype uint8, vbID uint16, collectionID uint32, streamID uint16, key, value []byte) + Deletion(seqNo, revNo uint64, deleteTime uint32, cas uint64, datatype uint8, vbID uint16, collectionID uint32, streamID uint16, key, value []byte) + Expiration(seqNo, revNo uint64, deleteTime uint32, cas uint64, vbID uint16, collectionID uint32, streamID uint16, key []byte) + End(vbID uint16, streamID uint16, err error) + CreateCollection(seqNo uint64, version uint8, vbID uint16, manifestUID uint64, scopeID uint32, collectionID uint32, ttl uint32, streamID uint16, key []byte) + DeleteCollection(seqNo uint64, version uint8, vbID uint16, manifestUID uint64, scopeID uint32, collectionID uint32, streamID uint16) + FlushCollection(seqNo uint64, version uint8, vbID uint16, manifestUID uint64, collectionID uint32) + CreateScope(seqNo uint64, version uint8, vbID uint16, manifestUID uint64, scopeID uint32, streamID uint16, key []byte) + DeleteScope(seqNo uint64, version uint8, vbID uint16, manifestUID uint64, scopeID uint32, streamID uint16) + ModifyCollection(seqNo uint64, version uint8, vbID uint16, manifestUID uint64, collectionID uint32, ttl uint32, streamID uint16) + OSOSnapshot(vbID uint16, snapshotType uint32, streamID uint16) + SeqNoAdvanced(vbID uint16, bySeqno uint64, streamID uint16) +} + +type streamFilter struct { + ManifestUID string `json:"uid,omitempty"` + Collections []string `json:"collections,omitempty"` + Scope string `json:"scope,omitempty"` + StreamID uint16 `json:"sid,omitempty"` +} + +// OpenStreamCallback is invoked with the results of `OpenStream` operations. +type OpenStreamCallback func([]FailoverEntry, error) + +// CloseStreamCallback is invoked with the results of `CloseStream` operations. +type CloseStreamCallback func(error) + +// GetFailoverLogCallback is invoked with the results of `GetFailoverLog` operations. +type GetFailoverLogCallback func([]FailoverEntry, error) + +// VbSeqNoEntry represents a single GetVbucketSeqnos sequence number entry. +type VbSeqNoEntry struct { + VbID uint16 + SeqNo SeqNo +} + +// GetVBucketSeqnosCallback is invoked with the results of `GetVBucketSeqnos` operations. +type GetVBucketSeqnosCallback func([]VbSeqNoEntry, error) diff --git a/vendor/github.com/couchbase/gocbcore/v9/dcpagent.go b/vendor/github.com/couchbase/gocbcore/v9/dcpagent.go new file mode 100644 index 000000000000..5921cd7cbf02 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/dcpagent.go @@ -0,0 +1,372 @@ +package gocbcore + +import ( + "crypto/tls" + "fmt" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +// DCPAgent represents the base client handling DCP connections to a Couchbase Server. +type DCPAgent struct { + clientID string + bucketName string + tlsConfig *dynTLSConfig + initFn memdInitFunc + + pollerController *pollerController + kvMux *kvMux + httpMux *httpMux + + cfgManager *configManagementComponent + errMap *errMapComponent + tracer *tracerComponent + diagnostics *diagnosticsComponent + dcp *dcpComponent + http *httpComponent +} + +// CreateDcpAgent creates an agent for performing DCP operations. +func CreateDcpAgent(config *DCPAgentConfig, dcpStreamName string, openFlags memd.DcpOpenFlag) (*DCPAgent, error) { + // We wrap the authorization system to force DCP channel opening + // as part of the "initialization" for any servers. + initFn := func(client *memdClient, deadline time.Time) error { + sclient := &syncClient{client: client} + if err := sclient.ExecOpenDcpConsumer(dcpStreamName, openFlags, deadline); err != nil { + return err + } + if err := sclient.ExecEnableDcpNoop(180*time.Second, deadline); err != nil { + return err + } + var priority string + switch config.AgentPriority { + case DcpAgentPriorityLow: + priority = "low" + case DcpAgentPriorityMed: + priority = "medium" + case DcpAgentPriorityHigh: + priority = "high" + } + if err := sclient.ExecDcpControl("set_priority", priority, deadline); err != nil { + return err + } + + if config.UseExpiryOpcode { + if err := sclient.ExecDcpControl("enable_expiry_opcode", "true", deadline); err != nil { + return err + } + } + + if config.UseStreamID { + if err := sclient.ExecDcpControl("enable_stream_id", "true", deadline); err != nil { + return err + } + } + + if config.UseOSOBackfill { + if err := sclient.ExecDcpControl("enable_out_of_order_snapshots", "true", deadline); err != nil { + return err + } + } + + // If the user doesn't explicitly set the backfill order, the DCP control flag will not be sent to the cluster + // and the default will implicitly be used (which is 'round-robin'). + var backfillOrder string + switch config.BackfillOrder { + case DCPBackfillOrderRoundRobin: + backfillOrder = "round-robin" + case DCPBackfillOrderSequential: + backfillOrder = "sequential" + } + + if backfillOrder != "" { + if err := sclient.ExecDcpControl("backfill_order", backfillOrder, deadline); err != nil { + return err + } + } + + if err := sclient.ExecEnableDcpClientEnd(deadline); err != nil { + return err + } + return sclient.ExecEnableDcpBufferAck(8*1024*1024, deadline) + } + + return createDCPAgent(config, initFn) +} + +func createDCPAgent(config *DCPAgentConfig, initFn memdInitFunc) (*DCPAgent, error) { + logInfof("SDK Version: gocbcore/%s", goCbCoreVersionStr) + logInfof("Creating new dcp agent: %+v", config) + + var tlsConfig *dynTLSConfig + if config.UseTLS { + tlsConfig = &dynTLSConfig{ + BaseConfig: &tls.Config{ + GetClientCertificate: func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := config.Auth.Certificate(AuthCertRequest{}) + if err != nil { + return nil, err + } + + if cert == nil { + return &tls.Certificate{}, nil + } + + return cert, nil + }, + }, + Provider: config.TLSRootCAProvider, + } + } + + httpCli := createHTTPClient(config.HTTPMaxIdleConns, config.HTTPMaxIdleConnsPerHost, + config.HTTPIdleConnectionTimeout, tlsConfig) + + tracerCmpt := newTracerComponent(noopTracer{}, config.BucketName, false) + + c := &DCPAgent{ + clientID: formatCbUID(randomCbUID()), + bucketName: config.BucketName, + tlsConfig: tlsConfig, + initFn: initFn, + tracer: tracerCmpt, + + errMap: newErrMapManager(config.BucketName), + } + + circuitBreakerConfig := CircuitBreakerConfig{ + Enabled: false, + } + auth := config.Auth + userAgent := config.UserAgent + disableDecompression := config.DisableDecompression + useCompression := config.UseCompression + useCollections := config.UseCollections + compressionMinSize := 32 + compressionMinRatio := 0.83 + + kvConnectTimeout := 7000 * time.Millisecond + if config.KVConnectTimeout > 0 { + kvConnectTimeout = config.KVConnectTimeout + } + + serverWaitTimeout := 5 * time.Second + + kvPoolSize := 1 + if config.KvPoolSize > 0 { + kvPoolSize = config.KvPoolSize + } + + maxQueueSize := 2048 + if config.MaxQueueSize > 0 { + maxQueueSize = config.MaxQueueSize + } + + confCccpMaxWait := 3 * time.Second + if config.CccpMaxWait > 0 { + confCccpMaxWait = config.CccpMaxWait + } + + confCccpPollPeriod := 2500 * time.Millisecond + if config.CccpPollPeriod > 0 { + confCccpPollPeriod = config.CccpPollPeriod + } + + confHTTPRetryDelay := 10 * time.Second + if config.HTTPRetryDelay > 0 { + confHTTPRetryDelay = config.HTTPRetryDelay + } + + confHTTPRedialPeriod := 10 * time.Second + if config.HTTPRedialPeriod > 0 { + confHTTPRedialPeriod = config.HTTPRedialPeriod + } + + if config.CompressionMinSize > 0 { + compressionMinSize = config.CompressionMinSize + } + if config.CompressionMinRatio > 0 { + compressionMinRatio = config.CompressionMinRatio + if compressionMinRatio >= 1.0 { + compressionMinRatio = 1.0 + } + } + authMechanisms := []AuthMechanism{ + ScramSha512AuthMechanism, + ScramSha256AuthMechanism, + ScramSha1AuthMechanism} + + // PLAIN authentication is only supported over TLS + if config.UseTLS { + authMechanisms = append(authMechanisms, PlainAuthMechanism) + } + + authHandler := buildAuthHandler(auth) + + var httpEpList []string + for _, hostPort := range config.HTTPAddrs { + if !c.IsSecure() { + httpEpList = append(httpEpList, fmt.Sprintf("http://%s", hostPort)) + } else { + httpEpList = append(httpEpList, fmt.Sprintf("https://%s", hostPort)) + } + } + + c.cfgManager = newConfigManager( + configManagerProperties{ + NetworkType: config.NetworkType, + UseSSL: config.UseTLS, + SrcMemdAddrs: config.MemdAddrs, + SrcHTTPAddrs: []string{}, + }, + ) + + dialer := newMemdClientDialerComponent( + memdClientDialerProps{ + ServerWaitTimeout: serverWaitTimeout, + KVConnectTimeout: kvConnectTimeout, + ClientID: c.clientID, + TLSConfig: c.tlsConfig, + CompressionMinSize: compressionMinSize, + CompressionMinRatio: compressionMinRatio, + DisableDecompression: disableDecompression, + }, + bootstrapProps{ + HelloProps: helloProps{ + CollectionsEnabled: useCollections, + CompressionEnabled: useCompression, + }, + Bucket: c.bucketName, + UserAgent: userAgent, + AuthMechanisms: authMechanisms, + AuthHandler: authHandler, + ErrMapManager: c.errMap, + }, + circuitBreakerConfig, + nil, + c.tracer, + initFn, + ) + c.kvMux = newKVMux( + kvMuxProps{ + QueueSize: maxQueueSize, + PoolSize: kvPoolSize, + CollectionsEnabled: useCollections, + }, + c.cfgManager, + c.errMap, + c.tracer, + dialer, + ) + c.httpMux = newHTTPMux(circuitBreakerConfig, c.cfgManager) + c.http = newHTTPComponent( + httpComponentProps{ + UserAgent: userAgent, + DefaultRetryStrategy: &failFastRetryStrategy{}, + }, + httpCli, + c.httpMux, + auth, + c.tracer, + ) + + c.pollerController = newPollerController( + newCCCPConfigController( + cccpPollerProperties{ + confCccpMaxWait: confCccpMaxWait, + confCccpPollPeriod: confCccpPollPeriod, + }, + c.kvMux, + c.cfgManager, + ), + newHTTPConfigController( + c.bucketName, + httpPollerProperties{ + httpComponent: c.http, + confHTTPRetryDelay: confHTTPRetryDelay, + confHTTPRedialPeriod: confHTTPRedialPeriod, + }, + c.httpMux, + c.cfgManager, + ), + c.cfgManager, + ) + + c.diagnostics = newDiagnosticsComponent(c.kvMux, nil, nil, c.bucketName, newFailFastRetryStrategy(), c.pollerController) + c.dcp = newDcpComponent(c.kvMux, config.UseStreamID) + + // Kick everything off. + cfg := &routeConfig{ + kvServerList: config.MemdAddrs, + mgmtEpList: httpEpList, + revID: -1, + } + + c.httpMux.OnNewRouteConfig(cfg) + c.kvMux.OnNewRouteConfig(cfg) + + go c.pollerController.Start() + + return c, nil +} + +// IsSecure returns whether this client is connected via SSL. +func (agent *DCPAgent) IsSecure() bool { + return agent.tlsConfig != nil +} + +// Close shuts down the agent, disconnecting from all servers and failing +// any outstanding operations with ErrShutdown. +func (agent *DCPAgent) Close() error { + routeCloseErr := agent.kvMux.Close() + agent.pollerController.Stop() + + // Wait for our external looper goroutines to finish, note that if the + // specific looper wasn't used, it will be a nil value otherwise it + // will be an open channel till its closed to signal completion. + <-agent.pollerController.Done() + + return routeCloseErr +} + +// WaitUntilReady returns whether or not the Agent has seen a valid cluster config. +func (agent *DCPAgent) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, + cb WaitUntilReadyCallback) (PendingOp, error) { + return agent.diagnostics.WaitUntilReady(deadline, opts, cb) +} + +// OpenStream opens a DCP stream for a particular VBucket and, optionally, filter. +func (agent *DCPAgent) OpenStream(vbID uint16, flags memd.DcpStreamAddFlag, vbUUID VbUUID, startSeqNo, + endSeqNo, snapStartSeqNo, snapEndSeqNo SeqNo, evtHandler StreamObserver, opts OpenStreamOptions, + cb OpenStreamCallback) (PendingOp, error) { + return agent.dcp.OpenStream(vbID, flags, vbUUID, startSeqNo, endSeqNo, snapStartSeqNo, snapEndSeqNo, evtHandler, opts, cb) +} + +// CloseStream shuts down an open stream for the specified VBucket. +func (agent *DCPAgent) CloseStream(vbID uint16, opts CloseStreamOptions, cb CloseStreamCallback) (PendingOp, error) { + return agent.dcp.CloseStream(vbID, opts, cb) +} + +// GetFailoverLog retrieves the fail-over log for a particular VBucket. This is used +// to resume an interrupted stream after a node fail-over has occurred. +func (agent *DCPAgent) GetFailoverLog(vbID uint16, cb GetFailoverLogCallback) (PendingOp, error) { + return agent.dcp.GetFailoverLog(vbID, cb) +} + +// GetVbucketSeqnos returns the last checkpoint for a particular VBucket. This is useful +// for starting a DCP stream from wherever the server currently is. +func (agent *DCPAgent) GetVbucketSeqnos(serverIdx int, state memd.VbucketState, opts GetVbucketSeqnoOptions, + cb GetVBucketSeqnosCallback) (PendingOp, error) { + return agent.dcp.GetVbucketSeqnos(serverIdx, state, opts, cb) +} + +// HasCollectionsSupport verifies whether or not collections are available on the agent. +func (agent *DCPAgent) HasCollectionsSupport() bool { + return agent.kvMux.SupportsCollections() +} + +// ConfigSnapshot returns a snapshot of the underlying configuration currently in use. +func (agent *DCPAgent) ConfigSnapshot() (*ConfigSnapshot, error) { + return agent.kvMux.ConfigSnapshot() +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/dcpagent_config.go b/vendor/github.com/couchbase/gocbcore/v9/dcpagent_config.go new file mode 100644 index 000000000000..e897f90659ff --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/dcpagent_config.go @@ -0,0 +1,327 @@ +package gocbcore + +import ( + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/couchbase/gocbcore/v9/connstr" +) + +// DCPAgentConfig specifies the configuration options for creation of a DCPAgent. +type DCPAgentConfig struct { + UserAgent string + MemdAddrs []string + HTTPAddrs []string + UseTLS bool + BucketName string + NetworkType string + Auth AuthProvider + + TLSRootCAProvider func() *x509.CertPool + + UseCompression bool + DisableDecompression bool + + UseCollections bool + + CompressionMinSize int + CompressionMinRatio float64 + + HTTPRedialPeriod time.Duration + HTTPRetryDelay time.Duration + CccpMaxWait time.Duration + CccpPollPeriod time.Duration + + ConnectTimeout time.Duration + KVConnectTimeout time.Duration + KvPoolSize int + MaxQueueSize int + + HTTPMaxIdleConns int + HTTPMaxIdleConnsPerHost int + HTTPIdleConnectionTimeout time.Duration + + AgentPriority DcpAgentPriority + UseExpiryOpcode bool + UseStreamID bool + UseOSOBackfill bool + BackfillOrder DCPBackfillOrder +} + +func (config *DCPAgentConfig) redacted() interface{} { + newConfig := DCPAgentConfig{} + newConfig = *config + if isLogRedactionLevelFull() { + // The slices here are still pointing at config's underlying arrays + // so we need to make them not do that. + newConfig.HTTPAddrs = append([]string(nil), newConfig.HTTPAddrs...) + for i, addr := range newConfig.HTTPAddrs { + newConfig.HTTPAddrs[i] = redactSystemData(addr) + } + newConfig.MemdAddrs = append([]string(nil), newConfig.MemdAddrs...) + for i, addr := range newConfig.MemdAddrs { + newConfig.MemdAddrs[i] = redactSystemData(addr) + } + + if newConfig.BucketName != "" { + newConfig.BucketName = redactMetaData(newConfig.BucketName) + } + } + + return newConfig +} + +// FromConnStr populates the AgentConfig with information from a +// Couchbase Connection String. +// Supported options are: +// ca_cert_path (string) - Specifies the path to a CA certificate. +// network (string) - The network type to use. +// kv_connect_timeout (duration) - Maximum period to attempt to connect to cluster in ms. +// config_poll_interval (duration) - Period to wait between CCCP config polling in ms. +// config_poll_timeout (duration) - Maximum period of time to wait for a CCCP request. +// compression (bool) - Whether to enable network-wise compression of documents. +// compression_min_size (int) - The minimal size of the document in bytes to consider compression. +// compression_min_ratio (float64) - The minimal compress ratio (compressed / original) for the document to be sent compressed. +// orphaned_response_logging (bool) - Whether to enable orphaned response logging. +// orphaned_response_logging_interval (duration) - How often to print the orphan log records. +// orphaned_response_logging_sample_size (int) - The maximum number of orphan log records to track. +// dcp_priority (int) - Specifies the priority to request from the Cluster when connecting for DCP. +// enable_dcp_expiry (bool) - Whether to enable the feature to distinguish between explicit delete and expired delete on DCP. +// kv_pool_size (int) - The number of connections to create to each kv node. +// max_queue_size (int) - The maximum number of requests that can be queued for sending per connection. +// max_idle_http_connections (int) - Maximum number of idle http connections in the pool. +// max_perhost_idle_http_connections (int) - Maximum number of idle http connections in the pool per host. +// idle_http_connection_timeout (duration) - Maximum length of time for an idle connection to stay in the pool in ms. +// http_redial_period (duration) - The maximum length of time for the HTTP poller to stay connected before reconnecting. +// http_retry_delay (duration) - The length of time to wait between HTTP poller retries if connecting fails. +func (config *DCPAgentConfig) FromConnStr(connStr string) error { + baseSpec, err := connstr.Parse(connStr) + if err != nil { + return err + } + + spec, err := connstr.Resolve(baseSpec) + if err != nil { + return err + } + + fetchOption := func(name string) (string, bool) { + optValue := spec.Options[name] + if len(optValue) == 0 { + return "", false + } + return optValue[len(optValue)-1], true + } + + // Grab the resolved hostnames into a set of string arrays + var httpHosts []string + for _, specHost := range spec.HttpHosts { + httpHosts = append(httpHosts, fmt.Sprintf("%s:%d", specHost.Host, specHost.Port)) + } + + var memdHosts []string + for _, specHost := range spec.MemdHosts { + memdHosts = append(memdHosts, fmt.Sprintf("%s:%d", specHost.Host, specHost.Port)) + } + // Get bootstrap_on option to determine which, if any, of the bootstrap nodes should be cleared + switch val, _ := fetchOption("bootstrap_on"); val { + case "http": + memdHosts = nil + if len(httpHosts) == 0 { + return errors.New("bootstrap_on=http but no HTTP hosts in connection string") + } + case "cccp": + httpHosts = nil + if len(memdHosts) == 0 { + return errors.New("bootstrap_on=cccp but no CCCP/Memcached hosts in connection string") + } + case "both": + case "": + // Do nothing + break + default: + return errors.New("bootstrap_on={http,cccp,both}") + } + + config.MemdAddrs = memdHosts + config.HTTPAddrs = httpHosts + + if spec.UseSsl { + cacertpaths := spec.Options["ca_cert_path"] + + if len(cacertpaths) > 0 { + roots := x509.NewCertPool() + + for _, path := range cacertpaths { + cacert, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + ok := roots.AppendCertsFromPEM(cacert) + if !ok { + return errInvalidCertificate + } + } + + config.TLSRootCAProvider = func() *x509.CertPool { + return roots + } + } + + config.UseTLS = true + } + + if spec.Bucket != "" { + config.BucketName = spec.Bucket + } + + if valStr, ok := fetchOption("network"); ok { + if valStr == "default" { + valStr = "" + } + + config.NetworkType = valStr + } + + if valStr, ok := fetchOption("kv_connect_timeout"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("kv_connect_timeout option must be a duration or a number") + } + config.KVConnectTimeout = val + } + + if valStr, ok := fetchOption("config_poll_timeout"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("config poll timeout option must be a duration or a number") + } + config.CccpMaxWait = val + } + + if valStr, ok := fetchOption("config_poll_interval"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("config pool interval option must be duration or a number") + } + config.CccpPollPeriod = val + } + + if valStr, ok := fetchOption("compression"); ok { + val, err := strconv.ParseBool(valStr) + if err != nil { + return fmt.Errorf("compression option must be a boolean") + } + config.UseCompression = val + } + + if valStr, ok := fetchOption("compression_min_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("compression_min_size option must be an int") + } + config.CompressionMinSize = int(val) + } + + if valStr, ok := fetchOption("compression_min_ratio"); ok { + val, err := strconv.ParseFloat(valStr, 64) + if err != nil { + return fmt.Errorf("compression_min_size option must be an int") + } + config.CompressionMinRatio = val + } + + if valStr, ok := fetchOption("max_idle_http_connections"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("http max idle connections option must be a number") + } + config.HTTPMaxIdleConns = int(val) + } + + if valStr, ok := fetchOption("max_perhost_idle_http_connections"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("max_perhost_idle_http_connections option must be a number") + } + config.HTTPMaxIdleConnsPerHost = int(val) + } + + if valStr, ok := fetchOption("idle_http_connection_timeout"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("idle_http_connection_timeout option must be a duration or a number") + } + config.HTTPIdleConnectionTimeout = val + } + + // This option is experimental + if valStr, ok := fetchOption("http_redial_period"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("http redial period option must be a duration or a number") + } + config.HTTPRedialPeriod = val + } + + // This option is experimental + if valStr, ok := fetchOption("http_retry_delay"); ok { + val, err := parseDurationOrInt(valStr) + if err != nil { + return fmt.Errorf("http retry delay option must be a duration or a number") + } + config.HTTPRetryDelay = val + } + + // This option is experimental + if valStr, ok := fetchOption("dcp_priority"); ok { + var priority DcpAgentPriority + switch valStr { + case "": + priority = DcpAgentPriorityLow + case "low": + priority = DcpAgentPriorityLow + case "medium": + priority = DcpAgentPriorityMed + case "high": + priority = DcpAgentPriorityHigh + default: + return fmt.Errorf("dcp_priority must be one of low, medium or high") + } + config.AgentPriority = priority + } + + // This option is experimental + if valStr, ok := fetchOption("enable_dcp_expiry"); ok { + val, err := strconv.ParseBool(valStr) + if err != nil { + return fmt.Errorf("enable_dcp_expiry option must be a boolean") + } + config.UseExpiryOpcode = val + } + + // This option is experimental + if valStr, ok := fetchOption("kv_pool_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("kv pool size option must be a number") + } + config.KvPoolSize = int(val) + } + + // This option is experimental + if valStr, ok := fetchOption("max_queue_size"); ok { + val, err := strconv.ParseInt(valStr, 10, 64) + if err != nil { + return fmt.Errorf("max queue size option must be a number") + } + config.MaxQueueSize = int(val) + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/dcpcomponent.go b/vendor/github.com/couchbase/gocbcore/v9/dcpcomponent.go new file mode 100644 index 000000000000..7722440fdf51 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/dcpcomponent.go @@ -0,0 +1,384 @@ +package gocbcore + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "sync/atomic" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type dcpComponent struct { + kvMux *kvMux + streamIDEnabled bool +} + +func newDcpComponent(kvMux *kvMux, streamIDEnabled bool) *dcpComponent { + return &dcpComponent{ + kvMux: kvMux, + streamIDEnabled: streamIDEnabled, + } +} + +func (dcp *dcpComponent) OpenStream(vbID uint16, flags memd.DcpStreamAddFlag, vbUUID VbUUID, startSeqNo, + endSeqNo, snapStartSeqNo, snapEndSeqNo SeqNo, evtHandler StreamObserver, opts OpenStreamOptions, + cb OpenStreamCallback) (PendingOp, error) { + var req *memdQRequest + var openHandled uint32 + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if resp == nil && err == nil { + logWarnf("DCP event occurred with no error and no response") + return + } + + if err != nil { + if resp == nil { + if atomic.CompareAndSwapUint32(&openHandled, 0, 1) { + // If open hasn't been handled and there's no response then it's reasonably safe to assume that + // this occurring for the open stream request. + cb(nil, err) + return + } + } + + if resp != nil && resp.Magic == memd.CmdMagicRes { + atomic.StoreUint32(&openHandled, 1) + // CmdMagicRes means that this must be the open stream request response. + cb(nil, err) + return + } + + var streamID uint16 + if opts.StreamOptions != nil { + streamID = opts.StreamOptions.StreamID + } + evtHandler.End(vbID, streamID, err) + return + } + + if resp.Magic == memd.CmdMagicRes { + atomic.StoreUint32(&openHandled, 1) + // This is the response to the open stream request. + numEntries := len(resp.Value) / 16 + entries := make([]FailoverEntry, numEntries) + for i := 0; i < numEntries; i++ { + entries[i] = FailoverEntry{ + VbUUID: VbUUID(binary.BigEndian.Uint64(resp.Value[i*16+0:])), + SeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])), + } + } + + cb(entries, nil) + return + } + + // This is one of the stream events + switch resp.Command { + case memd.CmdDcpSnapshotMarker: + vbID := resp.Vbucket + newStartSeqNo := binary.BigEndian.Uint64(resp.Extras[0:]) + newEndSeqNo := binary.BigEndian.Uint64(resp.Extras[8:]) + snapshotType := binary.BigEndian.Uint32(resp.Extras[16:]) + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.SnapshotMarker(newStartSeqNo, newEndSeqNo, vbID, streamID, SnapshotState(snapshotType)) + case memd.CmdDcpMutation: + vbID := resp.Vbucket + seqNo := binary.BigEndian.Uint64(resp.Extras[0:]) + revNo := binary.BigEndian.Uint64(resp.Extras[8:]) + flags := binary.BigEndian.Uint32(resp.Extras[16:]) + expiry := binary.BigEndian.Uint32(resp.Extras[20:]) + lockTime := binary.BigEndian.Uint32(resp.Extras[24:]) + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.Mutation(seqNo, revNo, flags, expiry, lockTime, resp.Cas, resp.Datatype, vbID, resp.CollectionID, streamID, resp.Key, resp.Value) + case memd.CmdDcpDeletion: + vbID := resp.Vbucket + seqNo := binary.BigEndian.Uint64(resp.Extras[0:]) + revNo := binary.BigEndian.Uint64(resp.Extras[8:]) + var deleteTime uint32 + if len(resp.Extras) == 21 { + // Length of 21 indicates a v2 packet + deleteTime = binary.BigEndian.Uint32(resp.Extras[16:]) + } + + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.Deletion(seqNo, revNo, deleteTime, resp.Cas, resp.Datatype, vbID, resp.CollectionID, streamID, resp.Key, resp.Value) + case memd.CmdDcpExpiration: + vbID := resp.Vbucket + seqNo := binary.BigEndian.Uint64(resp.Extras[0:]) + revNo := binary.BigEndian.Uint64(resp.Extras[8:]) + var deleteTime uint32 + if len(resp.Extras) > 16 { + deleteTime = binary.BigEndian.Uint32(resp.Extras[16:]) + } + + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.Expiration(seqNo, revNo, deleteTime, resp.Cas, vbID, resp.CollectionID, streamID, resp.Key) + case memd.CmdDcpEvent: + vbID := resp.Vbucket + seqNo := binary.BigEndian.Uint64(resp.Extras[0:]) + eventCode := memd.StreamEventCode(binary.BigEndian.Uint32(resp.Extras[8:])) + version := resp.Extras[12] + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + + switch eventCode { + case memd.StreamEventCollectionCreate: + manifestUID := binary.BigEndian.Uint64(resp.Value[0:]) + scopeID := binary.BigEndian.Uint32(resp.Value[8:]) + collectionID := binary.BigEndian.Uint32(resp.Value[12:]) + var ttl uint32 + if version == 1 { + ttl = binary.BigEndian.Uint32(resp.Value[16:]) + } + evtHandler.CreateCollection(seqNo, version, vbID, manifestUID, scopeID, collectionID, ttl, streamID, resp.Key) + case memd.StreamEventCollectionDelete: + manifestUID := binary.BigEndian.Uint64(resp.Value[0:]) + scopeID := binary.BigEndian.Uint32(resp.Value[8:]) + collectionID := binary.BigEndian.Uint32(resp.Value[12:]) + evtHandler.DeleteCollection(seqNo, version, vbID, manifestUID, scopeID, collectionID, streamID) + case memd.StreamEventCollectionFlush: + manifestUID := binary.BigEndian.Uint64(resp.Value[0:]) + collectionID := binary.BigEndian.Uint32(resp.Value[8:]) + evtHandler.FlushCollection(seqNo, version, vbID, manifestUID, collectionID) + case memd.StreamEventScopeCreate: + manifestUID := binary.BigEndian.Uint64(resp.Value[0:]) + scopeID := binary.BigEndian.Uint32(resp.Value[8:]) + evtHandler.CreateScope(seqNo, version, vbID, manifestUID, scopeID, streamID, resp.Key) + case memd.StreamEventScopeDelete: + manifestUID := binary.BigEndian.Uint64(resp.Value[0:]) + scopeID := binary.BigEndian.Uint32(resp.Value[8:]) + evtHandler.DeleteScope(seqNo, version, vbID, manifestUID, scopeID, streamID) + case memd.StreamEventCollectionChanged: + manifestUID := binary.BigEndian.Uint64(resp.Value[0:]) + collectionID := binary.BigEndian.Uint32(resp.Value[8:]) + ttl := binary.BigEndian.Uint32(resp.Value[12:]) + evtHandler.ModifyCollection(seqNo, version, vbID, manifestUID, collectionID, ttl, streamID) + } + case memd.CmdDcpStreamEnd: + vbID := resp.Vbucket + code := memd.StreamEndStatus(binary.BigEndian.Uint32(resp.Extras[0:])) + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.End(vbID, streamID, getStreamEndStatusError(code)) + req.internalCancel(err) + case memd.CmdDcpOsoSnapshot: + vbID := resp.Vbucket + snapshotType := binary.BigEndian.Uint32(resp.Extras[0:]) + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.OSOSnapshot(vbID, snapshotType, streamID) + case memd.CmdDcpSeqNoAdvanced: + vbID := resp.Vbucket + seqno := binary.BigEndian.Uint64(resp.Extras[0:]) + var streamID uint16 + if resp.StreamIDFrame != nil { + streamID = resp.StreamIDFrame.StreamID + } + evtHandler.SeqNoAdvanced(vbID, seqno, streamID) + } + } + + extraBuf := make([]byte, 48) + binary.BigEndian.PutUint32(extraBuf[0:], uint32(flags)) + binary.BigEndian.PutUint32(extraBuf[4:], 0) + binary.BigEndian.PutUint64(extraBuf[8:], uint64(startSeqNo)) + binary.BigEndian.PutUint64(extraBuf[16:], uint64(endSeqNo)) + binary.BigEndian.PutUint64(extraBuf[24:], uint64(vbUUID)) + binary.BigEndian.PutUint64(extraBuf[32:], uint64(snapStartSeqNo)) + binary.BigEndian.PutUint64(extraBuf[40:], uint64(snapEndSeqNo)) + + var val []byte + val = nil + if opts.StreamOptions != nil || opts.FilterOptions != nil || opts.ManifestOptions != nil { + convertedFilter := streamFilter{} + + if opts.FilterOptions != nil { + // If there are collection IDs then we can assume that scope ID of 0 actually means no scope ID + if len(opts.FilterOptions.CollectionIDs) > 0 { + for _, cid := range opts.FilterOptions.CollectionIDs { + convertedFilter.Collections = append(convertedFilter.Collections, fmt.Sprintf("%x", cid)) + } + } else { + // No collection IDs but the filter was set so even if scope ID is 0 then we use it + convertedFilter.Scope = fmt.Sprintf("%x", opts.FilterOptions.ScopeID) + } + + } + if opts.ManifestOptions != nil { + convertedFilter.ManifestUID = fmt.Sprintf("%x", opts.ManifestOptions.ManifestUID) + } + if opts.StreamOptions != nil { + convertedFilter.StreamID = opts.StreamOptions.StreamID + } + + var err error + val, err = json.Marshal(convertedFilter) + if err != nil { + return nil, err + } + } + + req = &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDcpStreamReq, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: nil, + Value: val, + Vbucket: vbID, + }, + Callback: handler, + ReplicaIdx: 0, + Persistent: true, + } + return dcp.kvMux.DispatchDirect(req) +} + +func (dcp *dcpComponent) CloseStream(vbID uint16, opts CloseStreamOptions, cb CloseStreamCallback) (PendingOp, error) { + handler := func(_ *memdQResponse, _ *memdQRequest, err error) { + cb(err) + } + + var streamFrame *memd.StreamIDFrame + if opts.StreamOptions != nil { + if !dcp.streamIDEnabled { + return nil, errStreamIDNotEnabled + } + + streamFrame = &memd.StreamIDFrame{ + StreamID: opts.StreamOptions.StreamID, + } + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDcpCloseStream, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: nil, + Value: nil, + Vbucket: vbID, + StreamIDFrame: streamFrame, + }, + Callback: handler, + ReplicaIdx: 0, + Persistent: false, + RetryStrategy: newFailFastRetryStrategy(), + } + + return dcp.kvMux.DispatchDirect(req) +} + +func (dcp *dcpComponent) GetFailoverLog(vbID uint16, cb GetFailoverLogCallback) (PendingOp, error) { + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + cb(nil, err) + return + } + + numEntries := len(resp.Value) / 16 + entries := make([]FailoverEntry, numEntries) + for i := 0; i < numEntries; i++ { + entries[i] = FailoverEntry{ + VbUUID: VbUUID(binary.BigEndian.Uint64(resp.Value[i*16+0:])), + SeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])), + } + } + cb(entries, nil) + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDcpGetFailoverLog, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: nil, + Value: nil, + Vbucket: vbID, + }, + Callback: handler, + ReplicaIdx: 0, + Persistent: false, + RetryStrategy: newFailFastRetryStrategy(), + } + return dcp.kvMux.DispatchDirect(req) +} + +func (dcp *dcpComponent) GetVbucketSeqnos(serverIdx int, state memd.VbucketState, opts GetVbucketSeqnoOptions, cb GetVBucketSeqnosCallback) (PendingOp, error) { + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + cb(nil, err) + return + } + + var vbs []VbSeqNoEntry + + numVbs := len(resp.Value) / 10 + for i := 0; i < numVbs; i++ { + vbs = append(vbs, VbSeqNoEntry{ + VbID: binary.BigEndian.Uint16(resp.Value[i*10:]), + SeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*10+2:])), + }) + } + + cb(vbs, nil) + } + + var extraBuf []byte + + if opts.FilterOptions == nil { + extraBuf = make([]byte, 4) + binary.BigEndian.PutUint32(extraBuf[0:], uint32(state)) + } else { + if !dcp.kvMux.SupportsCollections() { + return nil, errCollectionsUnsupported + } + + extraBuf = make([]byte, 8) + binary.BigEndian.PutUint32(extraBuf[0:], uint32(state)) + binary.BigEndian.PutUint32(extraBuf[4:], opts.FilterOptions.CollectionID) + } + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetAllVBSeqnos, + Datatype: 0, + Cas: 0, + Extras: extraBuf, + Key: nil, + Value: nil, + Vbucket: 0, + }, + Callback: handler, + ReplicaIdx: -serverIdx, + Persistent: false, + RetryStrategy: newFailFastRetryStrategy(), + } + + return dcp.kvMux.DispatchDirect(req) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/diagnostics.go b/vendor/github.com/couchbase/gocbcore/v9/diagnostics.go new file mode 100644 index 000000000000..f1804c77ef9b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/diagnostics.go @@ -0,0 +1,225 @@ +package gocbcore + +import ( + "context" + "sort" + "sync" + "sync/atomic" + "time" +) + +// PingState is the current state of a endpoint used in a PingResult. +type PingState uint32 + +const ( + // PingStateOK indicates that an endpoint is OK. + PingStateOK PingState = 1 + + // PingStateTimeout indicates that the ping request to an endpoint timed out. + PingStateTimeout PingState = 2 + + // PingStateError indicates that the ping request to an endpoint encountered an error. + PingStateError PingState = 3 +) + +// EndpointState is the current connection state of an endpoint. +type EndpointState uint32 + +const ( + // EndpointStateDisconnected indicates that the endpoint is disconnected. + EndpointStateDisconnected EndpointState = 1 + + // EndpointStateConnecting indicates that the endpoint is connecting. + EndpointStateConnecting EndpointState = 2 + + // EndpointStateConnected indicates that the endpoint is connected. + EndpointStateConnected EndpointState = 3 + + // EndpointStateDisconnecting indicates that the endpoint is disconnecting. + EndpointStateDisconnecting EndpointState = 4 +) + +// EndpointPingResult contains the results of a ping to a single server. +type EndpointPingResult struct { + Endpoint string + Error error + Latency time.Duration + ID string + Scope string + State PingState +} + +type pingSubOp struct { + op PendingOp + endpoint string +} + +type pingOp struct { + lock sync.Mutex + subops []pingSubOp + remaining int32 + results map[ServiceType][]EndpointPingResult + callback PingCallback + bucketName string + httpCancel context.CancelFunc +} + +func (pop *pingOp) Cancel() { + for _, subop := range pop.subops { + subop.op.Cancel() + } + pop.httpCancel() +} + +func (pop *pingOp) handledOneLocked(configRev int64) { + remaining := atomic.AddInt32(&pop.remaining, -1) + if remaining == 0 { + pop.httpCancel() + pop.callback(&PingResult{ + ConfigRev: configRev, + Services: pop.results, + }, nil) + } +} + +// PingOptions encapsulates the parameters for a PingKv operation. +type PingOptions struct { + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext + KVDeadline time.Time + CbasDeadline time.Time + N1QLDeadline time.Time + FtsDeadline time.Time + CapiDeadline time.Time + MgmtDeadline time.Time + ServiceTypes []ServiceType + + ignoreMissingServices bool +} + +// PingResult encapsulates the result of a PingKv operation. +type PingResult struct { + ConfigRev int64 + Services map[ServiceType][]EndpointPingResult +} + +// DiagnosticsOptions encapsulates the parameters for a Diagnostics operation. +type DiagnosticsOptions struct { +} + +// MemdConnInfo represents information we know about a particular +// memcached connection reported in a diagnostics report. +type MemdConnInfo struct { + LocalAddr string + RemoteAddr string + LastActivity time.Time + Scope string + ID string + State EndpointState +} + +// DiagnosticInfo is returned by the Diagnostics method and includes +// information about the overall health of the clients connections. +type DiagnosticInfo struct { + ConfigRev int64 + MemdConns []MemdConnInfo + State ClusterState +} + +// ClusterState is used to describe the state of a cluster. +type ClusterState uint32 + +const ( + // ClusterStateOnline specifies that all nodes and their sockets are reachable. + ClusterStateOnline = ClusterState(1) + + // ClusterStateDegraded specifies that at least one socket per service is reachable. + ClusterStateDegraded = ClusterState(2) + + // ClusterStateOffline is used to specify that not even one socker per service is reachable. + ClusterStateOffline = ClusterState(3) +) + +type waitUntilOp struct { + lock sync.Mutex + remaining int32 + callback WaitUntilReadyCallback + stopCh chan struct{} + timer *time.Timer + httpCancel context.CancelFunc + + retryLock sync.Mutex + retries uint32 + retryReasons []RetryReason + retryStrat RetryStrategy +} + +func (wuo *waitUntilOp) RetryAttempts() uint32 { + return atomic.LoadUint32(&wuo.retries) +} + +func (wuo *waitUntilOp) RetryReasons() []RetryReason { + wuo.retryLock.Lock() + defer wuo.retryLock.Unlock() + return wuo.retryReasons +} + +func (wuo *waitUntilOp) Identifier() string { + return "waituntilready" +} + +func (wuo *waitUntilOp) Idempotent() bool { + return true +} + +func (wuo *waitUntilOp) retryStrategy() RetryStrategy { + return wuo.retryStrat +} + +func (wuo *waitUntilOp) recordRetryAttempt(reason RetryReason) { + atomic.AddUint32(&wuo.retries, 1) + wuo.retryLock.Lock() + defer wuo.retryLock.Unlock() + idx := sort.Search(len(wuo.retryReasons), func(i int) bool { + return wuo.retryReasons[i] == reason + }) + + // if idx is out of the range of retryReasons then it wasn't found. + if idx > len(wuo.retryReasons)-1 { + wuo.retryReasons = append(wuo.retryReasons, reason) + } +} + +func (wuo *waitUntilOp) cancel(err error) { + wuo.lock.Lock() + wuo.timer.Stop() + wuo.lock.Unlock() + close(wuo.stopCh) + wuo.httpCancel() + wuo.callback(nil, err) +} + +func (wuo *waitUntilOp) Cancel() { + wuo.cancel(errRequestCanceled) +} + +func (wuo *waitUntilOp) handledOneLocked() { + remaining := atomic.AddInt32(&wuo.remaining, -1) + if remaining == 0 { + wuo.timer.Stop() + wuo.httpCancel() + wuo.callback(&WaitUntilReadyResult{}, nil) + } +} + +// WaitUntilReadyResult encapsulates the result of a WaitUntilReady operation. +type WaitUntilReadyResult struct { +} + +// WaitUntilReadyOptions encapsulates the parameters for a WaitUntilReady operation. +type WaitUntilReadyOptions struct { + DesiredState ClusterState // Defaults to ClusterStateOnline + ServiceTypes []ServiceType // Defaults to all services + // If the cluster state is offline and a connect error has been observed then fast fail and return it. + RetryStrategy RetryStrategy +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/diagnosticscomponent.go b/vendor/github.com/couchbase/gocbcore/v9/diagnosticscomponent.go new file mode 100644 index 000000000000..038cb4795b7e --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/diagnosticscomponent.go @@ -0,0 +1,795 @@ +package gocbcore + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type diagnosticsComponent struct { + kvMux *kvMux + httpMux *httpMux + httpComponent *httpComponent + bucket string + defaultRetry RetryStrategy + pollerErrorProvider pollerErrorProvider +} + +func newDiagnosticsComponent(kvMux *kvMux, httpMux *httpMux, httpComponent *httpComponent, bucket string, + defaultRetry RetryStrategy, pollerErrorProvider pollerErrorProvider) *diagnosticsComponent { + return &diagnosticsComponent{ + kvMux: kvMux, + httpMux: httpMux, + bucket: bucket, + httpComponent: httpComponent, + defaultRetry: defaultRetry, + pollerErrorProvider: pollerErrorProvider, + } +} + +func (dc *diagnosticsComponent) pingKV(ctx context.Context, interval time.Duration, deadline time.Time, + retryStrat RetryStrategy, op *pingOp) { + + if !deadline.IsZero() { + // We have to setup a new child context with its own deadline because services have their own timeout values. + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, deadline) + defer cancel() + } + + for { + iter, err := dc.kvMux.PipelineSnapshot() + if err != nil { + logErrorf("failed to get pipeline snapshot") + + select { + case <-ctx.Done(): + ctxErr := ctx.Err() + var cancelReason error + if errors.Is(ctxErr, context.Canceled) { + cancelReason = ctxErr + } else { + cancelReason = errUnambiguousTimeout + } + + op.results[MemdService] = append(op.results[MemdService], EndpointPingResult{ + Error: cancelReason, + Scope: op.bucketName, + ID: uuid.New().String(), + State: PingStateTimeout, + }) + op.handledOneLocked(iter.RevID()) + return + case <-time.After(interval): + continue + } + } + + if iter.RevID() > -1 { + var wg sync.WaitGroup + iter.Iterate(0, func(p *memdPipeline) bool { + wg.Add(1) + go func(pipeline *memdPipeline) { + serverAddress := pipeline.Address() + + startTime := time.Now() + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + pingLatency := time.Since(startTime) + + state := PingStateOK + if err != nil { + if errors.Is(err, ErrTimeout) { + state = PingStateTimeout + } else { + state = PingStateError + } + } + + op.lock.Lock() + op.results[MemdService] = append(op.results[MemdService], EndpointPingResult{ + Endpoint: serverAddress, + Error: err, + Latency: pingLatency, + Scope: op.bucketName, + ID: fmt.Sprintf("%p", pipeline), + State: state, + }) + op.lock.Unlock() + wg.Done() + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdNoop, + Datatype: 0, + Cas: 0, + Key: nil, + Value: nil, + }, + Callback: handler, + RetryStrategy: retryStrat, + } + + curOp, err := dc.kvMux.DispatchDirectToAddress(req, pipeline) + if err != nil { + op.lock.Lock() + op.results[MemdService] = append(op.results[MemdService], EndpointPingResult{ + Endpoint: redactSystemData(serverAddress), + Error: err, + Latency: 0, + Scope: op.bucketName, + }) + op.lock.Unlock() + wg.Done() + return + } + + if !deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "PingKV", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + op.lock.Lock() + op.subops = append(op.subops, pingSubOp{ + endpoint: serverAddress, + op: curOp, + }) + op.lock.Unlock() + }(p) + + // We iterate through all pipelines + return false + }) + + wg.Wait() + op.lock.Lock() + op.handledOneLocked(iter.RevID()) + op.lock.Unlock() + return + } + + select { + case <-ctx.Done(): + ctxErr := ctx.Err() + var cancelReason error + if errors.Is(ctxErr, context.Canceled) { + cancelReason = ctxErr + } else { + cancelReason = errUnambiguousTimeout + } + + op.lock.Lock() + op.results[MemdService] = append(op.results[MemdService], EndpointPingResult{ + Error: cancelReason, + Scope: op.bucketName, + ID: uuid.New().String(), + State: PingStateTimeout, + }) + op.handledOneLocked(iter.RevID()) + op.lock.Unlock() + return + case <-time.After(interval): + } + } +} + +func (dc *diagnosticsComponent) pingHTTP(ctx context.Context, service ServiceType, + interval time.Duration, deadline time.Time, retryStrat RetryStrategy, op *pingOp, ignoreMissingServices bool) { + + if !deadline.IsZero() { + // We have to setup a new child context with its own deadline because services have their own timeout values. + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, deadline) + defer cancel() + } + + muxer := dc.httpMux + + var path string + switch service { + case N1qlService: + path = "/admin/ping" + case CbasService: + path = "/admin/ping" + case FtsService: + path = "/api/ping" + case CapiService: + path = "/" + } + + for { + clientMux := muxer.Get() + if clientMux.revID > -1 { + var epList []string + switch service { + case N1qlService: + epList = clientMux.n1qlEpList + case CbasService: + epList = clientMux.cbasEpList + case FtsService: + epList = clientMux.ftsEpList + case MgmtService: + epList = clientMux.mgmtEpList + case CapiService: + epList = dc.endpointsFromCapiList(clientMux.capiEpList) + } + + if len(epList) == 0 { + op.lock.Lock() + if !ignoreMissingServices { + op.results[service] = append(op.results[service], EndpointPingResult{ + Error: errServiceNotAvailable, + Scope: op.bucketName, + ID: uuid.New().String(), + }) + } + op.handledOneLocked(clientMux.revID) + op.lock.Unlock() + return + } + + var wg sync.WaitGroup + for _, ep := range epList { + wg.Add(1) + go func(ep string) { + defer wg.Done() + req := &httpRequest{ + Service: service, + Method: "GET", + Path: path, + Endpoint: ep, + IsIdempotent: true, + RetryStrategy: retryStrat, + Context: ctx, + UniqueID: uuid.New().String(), + } + start := time.Now() + resp, err := dc.httpComponent.DoInternalHTTPRequest(req, false) + pingLatency := time.Since(start) + state := PingStateOK + if err != nil { + if errors.Is(err, ErrTimeout) { + state = PingStateTimeout + } else { + state = PingStateError + } + } else { + if resp.StatusCode > 200 { + state = PingStateError + b, pErr := ioutil.ReadAll(resp.Body) + if pErr != nil { + logDebugf("Failed to read response body for ping: %v", pErr) + } + + err = errors.New(string(b)) + } + } + op.lock.Lock() + op.results[service] = append(op.results[service], EndpointPingResult{ + Endpoint: ep, + Error: err, + Latency: pingLatency, + Scope: op.bucketName, + ID: uuid.New().String(), + State: state, + }) + op.lock.Unlock() + }(ep) + } + + wg.Wait() + op.lock.Lock() + op.handledOneLocked(clientMux.revID) + op.lock.Unlock() + return + } + + select { + case <-ctx.Done(): + ctxErr := ctx.Err() + var cancelReason error + if errors.Is(ctxErr, context.Canceled) { + cancelReason = ctxErr + } else { + cancelReason = errUnambiguousTimeout + } + + op.lock.Lock() + op.results[service] = append(op.results[service], EndpointPingResult{ + Error: cancelReason, + Scope: op.bucketName, + ID: uuid.New().String(), + State: PingStateTimeout, + }) + op.handledOneLocked(clientMux.revID) + op.lock.Unlock() + return + case <-time.After(interval): + } + } +} + +func (dc *diagnosticsComponent) Ping(opts PingOptions, cb PingCallback) (PendingOp, error) { + bucketName := "" + if dc.bucket != "" { + bucketName = redactMetaData(dc.bucket) + } + + ignoreMissingServices := false + serviceTypes := opts.ServiceTypes + if len(serviceTypes) == 0 { + // We're defaulting to pinging what we can so don't ping anything that isn't in the cluster config + ignoreMissingServices = true + serviceTypes = []ServiceType{MemdService, CapiService, N1qlService, FtsService, CbasService, MgmtService} + } + + ignoreMissingServices = ignoreMissingServices || opts.ignoreMissingServices + + ctx, cancelFunc := context.WithCancel(context.Background()) + + op := &pingOp{ + callback: cb, + remaining: int32(len(serviceTypes)), + results: make(map[ServiceType][]EndpointPingResult), + bucketName: bucketName, + httpCancel: cancelFunc, + } + + retryStrat := newFailFastRetryStrategy() + + // interval is how long to wait between checking if we've seen a cluster config + interval := 10 * time.Millisecond + + for _, serviceType := range serviceTypes { + switch serviceType { + case MemdService: + go dc.pingKV(ctx, interval, opts.KVDeadline, retryStrat, op) + case CapiService: + go dc.pingHTTP(ctx, CapiService, interval, opts.CapiDeadline, retryStrat, op, ignoreMissingServices) + case N1qlService: + go dc.pingHTTP(ctx, N1qlService, interval, opts.N1QLDeadline, retryStrat, op, ignoreMissingServices) + case FtsService: + go dc.pingHTTP(ctx, FtsService, interval, opts.FtsDeadline, retryStrat, op, ignoreMissingServices) + case CbasService: + go dc.pingHTTP(ctx, CbasService, interval, opts.CbasDeadline, retryStrat, op, ignoreMissingServices) + case MgmtService: + go dc.pingHTTP(ctx, MgmtService, interval, opts.MgmtDeadline, retryStrat, op, ignoreMissingServices) + } + } + + return op, nil +} + +func (dc *diagnosticsComponent) endpointsFromCapiList(capiEpList []string) []string { + var epList []string + for _, ep := range capiEpList { + epList = append(epList, strings.TrimRight(ep, "/"+dc.bucket)) + } + + return epList +} + +// Diagnostics returns diagnostics information about the client. +// Mainly containing a list of open connections and their current +// states. +func (dc *diagnosticsComponent) Diagnostics(opts DiagnosticsOptions) (*DiagnosticInfo, error) { + for { + iter, err := dc.kvMux.PipelineSnapshot() + if err != nil { + return nil, err + } + + var conns []MemdConnInfo + + iter.Iterate(0, func(pipeline *memdPipeline) bool { + pipeline.clientsLock.Lock() + for _, pipecli := range pipeline.clients { + localAddr := "" + remoteAddr := "" + var lastActivity time.Time + + pipecli.lock.Lock() + if pipecli.client != nil { + localAddr = pipecli.client.LocalAddress() + remoteAddr = pipecli.client.Address() + lastActivityUs := atomic.LoadInt64(&pipecli.client.lastActivity) + if lastActivityUs != 0 { + lastActivity = time.Unix(0, lastActivityUs) + } + } + pipecli.lock.Unlock() + + conn := MemdConnInfo{ + LocalAddr: localAddr, + RemoteAddr: remoteAddr, + LastActivity: lastActivity, + ID: fmt.Sprintf("%p", pipecli), + State: pipecli.State(), + } + if dc.bucket != "" { + conn.Scope = redactMetaData(dc.bucket) + } + conns = append(conns, conn) + } + pipeline.clientsLock.Unlock() + return false + }) + + expected := len(conns) + connected := 0 + for _, conn := range conns { + if conn.State == EndpointStateConnected { + connected++ + } + } + + state := ClusterStateOffline + if connected == expected { + state = ClusterStateOnline + } else if connected > 1 { + state = ClusterStateDegraded + } + + endIter, err := dc.kvMux.PipelineSnapshot() + if err != nil { + return nil, err + } + if iter.RevID() == endIter.RevID() { + return &DiagnosticInfo{ + ConfigRev: iter.RevID(), + MemdConns: conns, + State: state, + }, nil + } + } +} + +func (dc *diagnosticsComponent) checkKVReady(desiredState ClusterState, op *waitUntilOp) { + for { + iter, err := dc.kvMux.PipelineSnapshot() + if err != nil { + logErrorf("failed to get pipeline snapshot: %v", err) + + shouldRetry, until := retryOrchMaybeRetry(op, NoPipelineSnapshotRetryReason) + if !shouldRetry { + op.cancel(err) + return + } + + select { + case <-op.stopCh: + return + case <-time.After(time.Until(until)): + continue + } + } + + var connectErr error + revID := iter.RevID() + if revID == -1 { + // We've not seen a config so let's see if there are any errors lurking on the connections. + iter.Iterate(0, func(pipeline *memdPipeline) bool { + pipeline.clientsLock.Lock() + defer pipeline.clientsLock.Unlock() + for _, cli := range pipeline.clients { + err := cli.Error() + if err != nil { + connectErr = err + + return true + } + } + + return false + }) + + // If there's no error appearing from the pipeline client then let's check the poller + if connectErr == nil && dc.pollerErrorProvider != nil { + pollerErr := dc.pollerErrorProvider.PollerError() + + // We don't care about timeouts, they don't tell us anything we want to know. + if pollerErr != nil && !errors.Is(pollerErr, ErrTimeout) { + connectErr = pollerErr + } + } + } else if revID > -1 { + expected := iter.NumPipelines() + connected := 0 + iter.Iterate(0, func(pipeline *memdPipeline) bool { + pipeline.clientsLock.Lock() + defer pipeline.clientsLock.Unlock() + for _, cli := range pipeline.clients { + state := cli.State() + if state == EndpointStateConnected { + connected++ + if desiredState == ClusterStateDegraded { + // If we're after degraded state then we can just bail early as we've already fulfilled that. + return true + } + + // We only need one of the pipeline clients to be connected for this pipeline to be considered + // online. + break + } + + err := cli.Error() + if err != nil { + connectErr = err + + // If the desired state is degraded then we need to keep trying as a different client or pipeline + // might be connected. If it's online then we can bail now as we'll never achieve that. + if desiredState == ClusterStateOnline { + return true + } + } + + // If there's no error appearing from the pipeline client then let's check the poller + if connectErr == nil && dc.pollerErrorProvider != nil { + pollerErr := dc.pollerErrorProvider.PollerError() + + // We don't care about timeouts, they don't tell us anything we want to know. + if pollerErr != nil && !errors.Is(pollerErr, ErrTimeout) { + connectErr = pollerErr + } + } + } + + return false + }) + + switch desiredState { + case ClusterStateDegraded: + if connected > 0 { + op.lock.Lock() + op.handledOneLocked() + op.lock.Unlock() + + return + } + case ClusterStateOnline: + if connected == expected { + op.lock.Lock() + op.handledOneLocked() + op.lock.Unlock() + + return + } + default: + // How we got here no-one does know + // But round and round we must go + } + } + + var until time.Time + if connectErr == nil { + var shouldRetry bool + shouldRetry, until = retryOrchMaybeRetry(op, NotReadyRetryReason) + if !shouldRetry { + op.cancel(errCliInternalError) + return + } + } else { + var shouldRetry bool + shouldRetry, until = retryOrchMaybeRetry(op, ConnectionErrorRetryReason) + if !shouldRetry { + op.cancel(connectErr) + return + } + } + + select { + case <-op.stopCh: + return + case <-time.After(time.Until(until)): + } + } +} + +func (dc *diagnosticsComponent) checkHTTPReady(ctx context.Context, service ServiceType, + desiredState ClusterState, op *waitUntilOp) { + retryStrat := &failFastRetryStrategy{} + ctx, cancel := context.WithCancel(ctx) + defer cancel() + muxer := dc.httpMux + + var path string + switch service { + case N1qlService: + path = "/admin/ping" + case CbasService: + path = "/admin/ping" + case FtsService: + path = "/api/ping" + case CapiService: + path = "/" + case MgmtService: + path = "" + } + + for { + clientMux := muxer.Get() + if clientMux.revID > -1 { + var epList []string + switch service { + case N1qlService: + epList = clientMux.n1qlEpList + case CbasService: + epList = clientMux.cbasEpList + case FtsService: + epList = clientMux.ftsEpList + case CapiService: + epList = dc.endpointsFromCapiList(clientMux.capiEpList) + case MgmtService: + epList = clientMux.mgmtEpList + } + + connected := uint32(0) + var wg sync.WaitGroup + for _, ep := range epList { + wg.Add(1) + go func(ep string) { + defer wg.Done() + req := &httpRequest{ + Service: service, + Method: "GET", + Path: path, + RetryStrategy: retryStrat, + Endpoint: ep, + IsIdempotent: true, + Context: ctx, + UniqueID: uuid.New().String(), + } + resp, err := dc.httpComponent.DoInternalHTTPRequest(req, false) + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + + if desiredState == ClusterStateOnline { + // Cancel this run entirely, we can't satisfy the requirements + cancel() + } + return + } + if resp.StatusCode != 200 { + if desiredState == ClusterStateOnline { + // Cancel this run entirely, we can't satisfy the requirements + cancel() + } + return + } + atomic.AddUint32(&connected, 1) + if desiredState == ClusterStateDegraded { + // Cancel this run entirely, we've successfully satisfied the requirements + cancel() + } + }(ep) + } + + wg.Wait() + + switch desiredState { + case ClusterStateDegraded: + if atomic.LoadUint32(&connected) > 0 { + op.lock.Lock() + op.handledOneLocked() + op.lock.Unlock() + + return + } + case ClusterStateOnline: + if atomic.LoadUint32(&connected) == uint32(len(epList)) { + op.lock.Lock() + op.handledOneLocked() + op.lock.Unlock() + + return + } + default: + // How we got here no-one does know + // But round and round we must go + } + } + + var shouldRetry bool + shouldRetry, until := retryOrchMaybeRetry(op, NotReadyRetryReason) + if !shouldRetry { + op.cancel(errCliInternalError) + return + } + + select { + case <-op.stopCh: + return + case <-time.After(time.Until(until)): + } + } +} + +func (dc *diagnosticsComponent) WaitUntilReady(deadline time.Time, opts WaitUntilReadyOptions, + cb WaitUntilReadyCallback) (PendingOp, error) { + desiredState := opts.DesiredState + if desiredState == ClusterStateOffline { + return nil, wrapError(errInvalidArgument, "cannot use offline as a desired state") + } + + if desiredState == 0 { + desiredState = ClusterStateOnline + } + + serviceTypes := opts.ServiceTypes + if len(serviceTypes) == 0 { + serviceTypes = []ServiceType{MemdService} + } + + retry := opts.RetryStrategy + if retry == nil { + retry = dc.defaultRetry + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + + op := &waitUntilOp{ + remaining: int32(len(serviceTypes)), + stopCh: make(chan struct{}), + callback: cb, + httpCancel: cancelFunc, + retryStrat: retry, + } + + op.lock.Lock() + start := time.Now() + op.timer = time.AfterFunc(deadline.Sub(start), func() { + op.cancel(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "WaitUntilReady", + TimeObserved: time.Since(start), + RetryReasons: op.RetryReasons(), + RetryAttempts: op.RetryAttempts(), + }) + }) + op.lock.Unlock() + + for _, serviceType := range serviceTypes { + switch serviceType { + case MemdService: + go dc.checkKVReady(desiredState, op) + case CapiService: + go dc.checkHTTPReady(ctx, CapiService, desiredState, op) + case N1qlService: + go dc.checkHTTPReady(ctx, N1qlService, desiredState, op) + case FtsService: + go dc.checkHTTPReady(ctx, FtsService, desiredState, op) + case CbasService: + go dc.checkHTTPReady(ctx, CbasService, desiredState, op) + case MgmtService: + go dc.checkHTTPReady(ctx, MgmtService, desiredState, op) + } + } + + return op, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/dyntlsconfig.go b/vendor/github.com/couchbase/gocbcore/v9/dyntlsconfig.go new file mode 100644 index 000000000000..3d2fbb59356e --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/dyntlsconfig.go @@ -0,0 +1,46 @@ +package gocbcore + +import ( + "crypto/tls" + "crypto/x509" + "net" +) + +type dynTLSConfig struct { + BaseConfig *tls.Config + Provider func() *x509.CertPool +} + +func (config dynTLSConfig) Clone() *dynTLSConfig { + return &dynTLSConfig{ + BaseConfig: config.BaseConfig.Clone(), + Provider: config.Provider, + } +} + +func (config dynTLSConfig) MakeForHost(serverName string) (*tls.Config, error) { + newConfig := config.BaseConfig.Clone() + + if config.Provider != nil { + rootCAs := config.Provider() + if rootCAs != nil { + newConfig.RootCAs = rootCAs + newConfig.InsecureSkipVerify = false + } else { + newConfig.RootCAs = nil + newConfig.InsecureSkipVerify = true + } + } + + newConfig.ServerName = serverName + return newConfig, nil +} + +func (config dynTLSConfig) MakeForAddr(addr string) (*tls.Config, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return config.MakeForHost(host) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/errmap.go b/vendor/github.com/couchbase/gocbcore/v9/errmap.go new file mode 100644 index 000000000000..2194f29a0ad0 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/errmap.go @@ -0,0 +1,117 @@ +package gocbcore + +import ( + "encoding/json" + "strconv" + "time" +) + +type kvErrorMapAttribute string + +type kvErrorMapRetry struct { + Strategy string + Interval int + After int + Ceil int + MaxDuration int +} + +func (retry kvErrorMapRetry) CalculateRetryDelay(retryCount uint32) time.Duration { + duraCeil := time.Duration(retry.Ceil) * time.Millisecond + + var dura time.Duration + if retryCount == 0 { + dura = time.Duration(retry.After) * time.Millisecond + } else { + interval := time.Duration(retry.Interval) * time.Millisecond + if retry.Strategy == "constant" { + dura = interval + } else if retry.Strategy == "linear" { + dura = interval * time.Duration(retryCount) + } else if retry.Strategy == "exponential" { + dura = interval + for i := uint32(0); i < retryCount-1; i++ { + // Need to multiply by the original value, not the scaled one + dura = dura * time.Duration(retry.Interval) + + // We have to check this here to make sure we do not overflow + if duraCeil > 0 && dura > duraCeil { + dura = duraCeil + break + } + } + } + } + + if duraCeil > 0 && dura > duraCeil { + dura = duraCeil + } + + return dura +} + +type kvErrorMapError struct { + Name string + Description string + Attributes []kvErrorMapAttribute + Retry kvErrorMapRetry +} + +type kvErrorMap struct { + Version int + Revision int + Errors map[uint16]kvErrorMapError +} + +type cfgKvErrorMapError struct { + Name string `json:"name"` + Desc string `json:"desc"` + Attrs []string `json:"attrs"` + Retry struct { + Strategy string `json:"strategy"` + Interval int `json:"interval"` + After int `json:"after"` + Ceil int `json:"ceil"` + MaxDuration int `json:"max-duration"` + } `json:"retry"` +} + +type cfgKvErrorMap struct { + Version int `json:"version"` + Revision int `json:"revision"` + Errors map[string]cfgKvErrorMapError +} + +func parseKvErrorMap(data []byte) (*kvErrorMap, error) { + var cfg cfgKvErrorMap + if err := json.Unmarshal(data, &cfg); err != nil { + return nil, err + } + + var errMap kvErrorMap + errMap.Version = cfg.Version + errMap.Revision = cfg.Revision + errMap.Errors = make(map[uint16]kvErrorMapError) + for errCodeStr, errData := range cfg.Errors { + errCode, err := strconv.ParseInt(errCodeStr, 16, 64) + if err != nil { + return nil, err + } + + var errInfo kvErrorMapError + errInfo.Name = errData.Name + errInfo.Description = errData.Desc + errInfo.Attributes = make([]kvErrorMapAttribute, len(errData.Attrs)) + for i, attr := range errData.Attrs { + errInfo.Attributes[i] = kvErrorMapAttribute(attr) + } + errInfo.Retry.Strategy = errData.Retry.Strategy + errInfo.Retry.Interval = errData.Retry.Interval + errInfo.Retry.After = errData.Retry.After + errInfo.Retry.Ceil = errData.Retry.Ceil + errInfo.Retry.MaxDuration = errData.Retry.MaxDuration + errMap.Errors[uint16(errCode)] = errInfo + } + + return &errMap, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/errmapcomponent.go b/vendor/github.com/couchbase/gocbcore/v9/errmapcomponent.go new file mode 100644 index 000000000000..4fbb71bc82f9 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/errmapcomponent.go @@ -0,0 +1,202 @@ +package gocbcore + +import ( + "encoding/json" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type errMapComponent struct { + kvErrorMap kvErrorMapPtr + bucketName string +} + +func newErrMapManager(bucketName string) *errMapComponent { + return &errMapComponent{ + bucketName: bucketName, + } +} + +func (errMgr *errMapComponent) getKvErrMapData(code memd.StatusCode) *kvErrorMapError { + errMap := errMgr.kvErrorMap.Get() + if errMap != nil { + if errData, ok := errMap.Errors[uint16(code)]; ok { + return &errData + } + } + return nil +} + +func (errMgr *errMapComponent) StoreErrorMap(mapBytes []byte) { + errMap, err := parseKvErrorMap(mapBytes) + if err != nil { + logDebugf("Failed to parse kv error map (%s)", err) + return + } + + logDebugf("Fetched error map: %+v", errMap) + + // Check if we need to switch the agent itself to a better + // error map revision. + for { + origMap := errMgr.kvErrorMap.Get() + if origMap != nil && errMap.Revision < origMap.Revision { + break + } + + if errMgr.kvErrorMap.Update(origMap, errMap) { + break + } + } +} + +func (errMgr *errMapComponent) ShouldRetry(status memd.StatusCode) bool { + kvErrData := errMgr.getKvErrMapData(status) + if kvErrData != nil { + for _, attr := range kvErrData.Attributes { + if attr == "auto-retry" || attr == "retry-now" || attr == "retry-later" { + return true + } + } + } + + return false +} + +func (errMgr *errMapComponent) EnhanceKvError(err error, resp *memdQResponse, req *memdQRequest) error { + enhErr := &KeyValueError{ + InnerError: err, + } + + if req != nil { + enhErr.BucketName = errMgr.bucketName + enhErr.ScopeName = req.ScopeName + enhErr.CollectionName = req.CollectionName + enhErr.CollectionID = req.CollectionID + + retryCount, reasons := req.Retries() + enhErr.RetryReasons = reasons + enhErr.RetryAttempts = retryCount + + connInfo := req.ConnectionInfo() + enhErr.LastDispatchedTo = connInfo.lastDispatchedTo + enhErr.LastDispatchedFrom = connInfo.lastDispatchedFrom + enhErr.LastConnectionID = connInfo.lastConnectionID + } + + if resp != nil { + enhErr.StatusCode = resp.Status + enhErr.Opaque = resp.Opaque + + errMapData := errMgr.getKvErrMapData(enhErr.StatusCode) + if errMapData != nil { + enhErr.ErrorName = errMapData.Name + enhErr.ErrorDescription = errMapData.Description + } + + if memd.DatatypeFlag(resp.Datatype)&memd.DatatypeFlagJSON != 0 { + var enhancedData struct { + Error struct { + Context string `json:"context"` + Ref string `json:"ref"` + } `json:"error"` + } + if parseErr := json.Unmarshal(resp.Value, &enhancedData); parseErr == nil { + enhErr.Context = enhancedData.Error.Context + enhErr.Ref = enhancedData.Error.Ref + } + } + } + + return enhErr +} + +func translateMemdError(err error, req *memdQRequest) error { + switch err { + case ErrMemdInvalidArgs: + return errInvalidArgument + case ErrMemdInternalError: + return errInternalServerFailure + case ErrMemdAccessError: + return errAuthenticationFailure + case ErrMemdAuthError: + return errAuthenticationFailure + case ErrMemdTmpFail: + return errTemporaryFailure + case ErrMemdBusy: + return errTemporaryFailure + case ErrMemdKeyExists: + if req.Command == memd.CmdReplace || (req.Command == memd.CmdDelete && req.Cas != 0) { + return errCasMismatch + } + return errDocumentExists + case ErrMemdCollectionNotFound: + return errCollectionNotFound + case ErrMemdUnknownCommand: + return errUnsupportedOperation + case ErrMemdNotSupported: + return errUnsupportedOperation + + case ErrMemdKeyNotFound: + return errDocumentNotFound + case ErrMemdLocked: + // BUGFIX(brett19): This resolves a bug in the server processing of the LOCKED + // operation where the server will respond with LOCKED rather than a CAS mismatch. + if req.Command == memd.CmdUnlockKey { + return errCasMismatch + } + return errDocumentLocked + case ErrMemdTooBig: + return errValueTooLarge + case ErrMemdSubDocNotJSON: + return errValueNotJSON + case ErrMemdDurabilityInvalidLevel: + return errDurabilityLevelNotAvailable + case ErrMemdDurabilityImpossible: + return errDurabilityImpossible + case ErrMemdSyncWriteAmbiguous: + return errDurabilityAmbiguous + case ErrMemdSyncWriteInProgess: + return errDurableWriteInProgress + case ErrMemdSyncWriteReCommitInProgress: + return errDurableWriteReCommitInProgress + case ErrMemdSubDocPathNotFound: + return errPathNotFound + case ErrMemdSubDocPathInvalid: + return errPathInvalid + case ErrMemdSubDocPathTooBig: + return errPathTooBig + case ErrMemdSubDocDocTooDeep: + return errPathTooDeep + case ErrMemdSubDocValueTooDeep: + return errValueTooDeep + case ErrMemdSubDocCantInsert: + return errValueInvalid + case ErrMemdSubDocNotJSON: + return errDocumentNotJSON + case ErrMemdSubDocBadRange: + return errNumberTooBig + case ErrMemdBadDelta: + return errDeltaInvalid + case ErrMemdSubDocBadDelta: + return errDeltaInvalid + case ErrMemdSubDocPathExists: + return errPathExists + case ErrXattrUnknownMacro: + return errXattrUnknownMacro + case ErrXattrInvalidFlagCombo: + return errXattrInvalidFlagCombo + case ErrXattrInvalidKeyCombo: + return errXattrInvalidKeyCombo + case ErrMemdSubDocXattrUnknownVAttr: + return errXattrUnknownVirtualAttribute + case ErrMemdSubDocXattrCannotModifyVAttr: + return errXattrCannotModifyVirtualAttribute + case ErrXattrInvalidOrder: + return errXattrInvalidOrder + case ErrMemdNotMyVBucket: + return errNotMyVBucket + } + + return err +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/errmapptr.go b/vendor/github.com/couchbase/gocbcore/v9/errmapptr.go new file mode 100644 index 000000000000..b6b60f664caf --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/errmapptr.go @@ -0,0 +1,23 @@ +package gocbcore + +import ( + "sync/atomic" + "unsafe" +) + +type kvErrorMapPtr struct { + data unsafe.Pointer +} + +func (ptr *kvErrorMapPtr) Get() *kvErrorMap { + return (*kvErrorMap)(atomic.LoadPointer(&ptr.data)) +} + +func (ptr *kvErrorMapPtr) Update(old, new *kvErrorMap) bool { + if new == nil { + logErrorf("Attempted to update to nil kvErrorMap") + return false + } + + return atomic.CompareAndSwapPointer(&ptr.data, unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/error.go b/vendor/github.com/couchbase/gocbcore/v9/error.go new file mode 100644 index 000000000000..16474308ecff --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/error.go @@ -0,0 +1,191 @@ +package gocbcore + +import ( + "errors" + "io" +) + +// dwError is a special error used for the purposes of rewrapping +// another error to provide more detailed information inherently +// with the error type itself. Mainly used for timeout. +type dwError struct { + InnerError error + Message string +} + +func (e dwError) Error() string { + return e.Message +} + +func (e dwError) Unwrap() error { + return e.InnerError +} + +var ( + // ErrNoSupportedMechanisms occurs when the server does not support any of the + // authentication methods that the client finds suitable. + ErrNoSupportedMechanisms = errors.New("no supported authentication mechanisms") + + // ErrBadHosts occurs when the list of hosts specified cannot be contacted. + ErrBadHosts = errors.New("failed to connect to any of the specified hosts") + + // ErrProtocol occurs when the server responds with unexpected or unparseable data. + ErrProtocol = errors.New("failed to parse server response") + + // ErrNoReplicas occurs when no replicas respond in time + ErrNoReplicas = errors.New("no replicas responded in time") + + // ErrCliInternalError indicates an internal error occurred within the client. + ErrCliInternalError = errors.New("client internal error") + + // ErrInvalidCredentials is returned when an invalid set of credentials is provided for a service. + ErrInvalidCredentials = errors.New("an invalid set of credentials was provided") + + // ErrInvalidServer occurs when an explicit, but invalid server index is specified. + ErrInvalidServer = errors.New("specific server index is invalid") + + // ErrInvalidVBucket occurs when an explicit, but invalid vbucket index is specified. + ErrInvalidVBucket = errors.New("specific vbucket index is invalid") + + // ErrInvalidReplica occurs when an explicit, but invalid replica index is specified. + ErrInvalidReplica = errors.New("specific server index is invalid") + + // ErrInvalidService occurs when an explicit but invalid service type is specified + ErrInvalidService = errors.New("invalid service") + + // ErrInvalidCertificate occurs when a certificate that is not useable is passed to an Agent. + ErrInvalidCertificate = errors.New("certificate is invalid") + + // ErrCollectionsUnsupported occurs when collections are used but either server does not support them or the agent + // was created without them enabled. + ErrCollectionsUnsupported = errors.New("collections are not enabled") + + // ErrBucketAlreadySelected occurs when SelectBucket is called when a bucket is already selected.. + ErrBucketAlreadySelected = errors.New("bucket already selected") + + // ErrShutdown occurs when operations are performed on a previously closed Agent. + ErrShutdown = errors.New("connection shut down") + + // ErrOverload occurs when too many operations are dispatched and all queues are full. + ErrOverload = errors.New("queue overflowed") + + // ErrSocketClosed occurs when a socket closes while an operation is in flight. + ErrSocketClosed = io.EOF + + // ErrGCCCPInUse occurs when an operation dis performed whilst the client is connect via GCCCP. + ErrGCCCPInUse = errors.New("connected via gcccp, kv operations are not supported, open a bucket first") + + // ErrNotMyVBucket occurs when an operation is sent to a node which does not own the vbucket. + ErrNotMyVBucket = errors.New("not my vbucket") +) + +// Shared Error Definitions RFC#58@15 +var ( + // ErrTimeout occurs when an operation does not receive a response in a timely manner. + ErrTimeout = errors.New("operation has timed out") + + ErrRequestCanceled = errors.New("request canceled") + ErrInvalidArgument = errors.New("invalid argument") + ErrServiceNotAvailable = errors.New("service not available") + ErrInternalServerFailure = errors.New("internal server failure") + ErrAuthenticationFailure = errors.New("authentication failure") + ErrTemporaryFailure = errors.New("temporary failure") + ErrParsingFailure = errors.New("parsing failure") + + ErrCasMismatch = errors.New("cas mismatch") + ErrBucketNotFound = errors.New("bucket not found") + ErrCollectionNotFound = errors.New("collection not found") + ErrEncodingFailure = errors.New("encoding failure") + ErrDecodingFailure = errors.New("decoding failure") + ErrUnsupportedOperation = errors.New("unsupported operation") + ErrAmbiguousTimeout = &dwError{ErrTimeout, "ambiguous timeout"} + + ErrUnambiguousTimeout = &dwError{ErrTimeout, "unambiguous timeout"} + + // ErrFeatureNotAvailable occurs when an operation is performed on a bucket which does not support it. + ErrFeatureNotAvailable = errors.New("feature is not available") + ErrScopeNotFound = errors.New("scope not found") + ErrIndexNotFound = errors.New("index not found") + + ErrIndexExists = errors.New("index exists") +) + +// Key Value Error Definitions RFC#58@15 +var ( + ErrDocumentNotFound = errors.New("document not found") + ErrDocumentUnretrievable = errors.New("document unretrievable") + ErrDocumentLocked = errors.New("document locked") + ErrValueTooLarge = errors.New("value too large") + ErrDocumentExists = errors.New("document exists") + ErrValueNotJSON = errors.New("value not json") + ErrDurabilityLevelNotAvailable = errors.New("durability level not available") + ErrDurabilityImpossible = errors.New("durability impossible") + ErrDurabilityAmbiguous = errors.New("durability ambiguous") + ErrDurableWriteInProgress = errors.New("durable write in progress") + ErrDurableWriteReCommitInProgress = errors.New("durable write recommit in progress") + ErrMutationLost = errors.New("mutation lost") + ErrPathNotFound = errors.New("path not found") + ErrPathMismatch = errors.New("path mismatch") + ErrPathInvalid = errors.New("path invalid") + ErrPathTooBig = errors.New("path too big") + ErrPathTooDeep = errors.New("path too deep") + ErrValueTooDeep = errors.New("value too deep") + ErrValueInvalid = errors.New("value invalid") + ErrDocumentNotJSON = errors.New("document not json") + ErrNumberTooBig = errors.New("number too big") + ErrDeltaInvalid = errors.New("delta invalid") + ErrPathExists = errors.New("path exists") + ErrXattrUnknownMacro = errors.New("xattr unknown macro") + ErrXattrInvalidFlagCombo = errors.New("xattr invalid flag combination") + ErrXattrInvalidKeyCombo = errors.New("xattr invalid key combination") + ErrXattrUnknownVirtualAttribute = errors.New("xattr unknown virtual attribute") + ErrXattrCannotModifyVirtualAttribute = errors.New("xattr cannot modify virtual attribute") + ErrXattrInvalidOrder = errors.New("xattr invalid order") +) + +// Query Error Definitions RFC#58@15 +var ( + ErrPlanningFailure = errors.New("planning failure") + + ErrIndexFailure = errors.New("index failure") + + ErrPreparedStatementFailure = errors.New("prepared statement failure") +) + +// Analytics Error Definitions RFC#58@15 +var ( + ErrCompilationFailure = errors.New("compilation failure") + + ErrJobQueueFull = errors.New("job queue full") + + ErrDatasetNotFound = errors.New("dataset not found") + + ErrDataverseNotFound = errors.New("dataverse not found") + + ErrDatasetExists = errors.New("dataset exists") + + ErrDataverseExists = errors.New("dataverse exists") + + ErrLinkNotFound = errors.New("link not found") +) + +// Search Error Definitions RFC#58@15 +var () + +// View Error Definitions RFC#58@15 +var ( + ErrViewNotFound = errors.New("view not found") + + ErrDesignDocumentNotFound = errors.New("design document not found") +) + +// Management Error Definitions RFC#58@15 +var ( + ErrCollectionExists = errors.New("collection exists") + ErrScopeExists = errors.New("scope exists") + ErrUserNotFound = errors.New("user not found") + ErrGroupNotFound = errors.New("group not found") + ErrBucketExists = errors.New("bucket exists") + ErrUserExists = errors.New("user exists") + ErrBucketNotFlushable = errors.New("bucket not flushable") +) diff --git a/vendor/github.com/couchbase/gocbcore/v9/error_dcp.go b/vendor/github.com/couchbase/gocbcore/v9/error_dcp.go new file mode 100644 index 000000000000..40f9fb48a463 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/error_dcp.go @@ -0,0 +1,55 @@ +package gocbcore + +import ( + "errors" + "log" + + "github.com/couchbase/gocbcore/v9/memd" +) + +var streamEndErrorMap = make(map[memd.StreamEndStatus]error) + +func makeStreamEndStatusError(code memd.StreamEndStatus) error { + err := errors.New(code.KVText()) + if streamEndErrorMap[code] != nil { + log.Fatal("error handling setup failure") + } + streamEndErrorMap[code] = err + return err +} + +func getStreamEndStatusError(code memd.StreamEndStatus) error { + if code == memd.StreamEndOK { + return nil + } + if err := streamEndErrorMap[code]; err != nil { + return err + } + return errors.New(code.KVText()) +} + +var ( + // ErrDCPStreamClosed occurs when a DCP stream is closed gracefully. + ErrDCPStreamClosed = makeStreamEndStatusError(memd.StreamEndClosed) + + // ErrDCPStreamStateChanged occurs when a DCP stream is interrupted by failover. + ErrDCPStreamStateChanged = makeStreamEndStatusError(memd.StreamEndStateChanged) + + // ErrDCPStreamDisconnected occurs when a DCP stream is disconnected. + ErrDCPStreamDisconnected = makeStreamEndStatusError(memd.StreamEndDisconnected) + + // ErrDCPStreamTooSlow occurs when a DCP stream is cancelled due to the application + // not keeping up with the rate of flow of DCP events sent by the server. + ErrDCPStreamTooSlow = makeStreamEndStatusError(memd.StreamEndTooSlow) + + // ErrDCPBackfillFailed occurs when there was an issue starting the backfill on + // the server e.g. the requested start seqno was behind the purge seqno. + ErrDCPBackfillFailed = makeStreamEndStatusError(memd.StreamEndBackfillFailed) + + // ErrDCPStreamFilterEmpty occurs when all of the collections for a DCP stream are + // dropped. + ErrDCPStreamFilterEmpty = makeStreamEndStatusError(memd.StreamEndFilterEmpty) + + // ErrStreamIDNotEnabled occurs when dcp operations are performed using a stream ID when stream IDs are not enabled. + ErrStreamIDNotEnabled = errors.New("stream IDs have not been enabled on this stream") +) diff --git a/vendor/github.com/couchbase/gocbcore/v9/error_memd.go b/vendor/github.com/couchbase/gocbcore/v9/error_memd.go new file mode 100644 index 000000000000..2bcc0c29f854 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/error_memd.go @@ -0,0 +1,203 @@ +package gocbcore + +import ( + "errors" + "log" + + "github.com/couchbase/gocbcore/v9/memd" +) + +var statusCodeErrorMap = make(map[memd.StatusCode]error) + +func makeKvStatusError(code memd.StatusCode) error { + err := errors.New(code.KVText()) + if statusCodeErrorMap[code] != nil { + log.Fatal("error handling setup failure") + } + statusCodeErrorMap[code] = err + return err +} + +func getKvStatusCodeError(code memd.StatusCode) error { + if err := statusCodeErrorMap[code]; err != nil { + return err + } + return errors.New(code.KVText()) +} + +var ( + // ErrMemdKeyNotFound occurs when an operation is performed on a key that does not exist. + ErrMemdKeyNotFound = makeKvStatusError(memd.StatusKeyNotFound) + + // ErrMemdKeyExists occurs when an operation is performed on a key that could not be found. + ErrMemdKeyExists = makeKvStatusError(memd.StatusKeyExists) + + // ErrMemdTooBig occurs when an operation attempts to store more data in a single document + // than the server is capable of storing (by default, this is a 20MB limit). + ErrMemdTooBig = makeKvStatusError(memd.StatusTooBig) + + // ErrMemdInvalidArgs occurs when the server receives invalid arguments for an operation. + ErrMemdInvalidArgs = makeKvStatusError(memd.StatusInvalidArgs) + + // ErrMemdNotStored occurs when the server fails to store a key. + ErrMemdNotStored = makeKvStatusError(memd.StatusNotStored) + + // ErrMemdBadDelta occurs when an invalid delta value is specified to a counter operation. + ErrMemdBadDelta = makeKvStatusError(memd.StatusBadDelta) + + // ErrMemdNotMyVBucket occurs when an operation is dispatched to a server which is + // non-authoritative for a specific vbucket. + ErrMemdNotMyVBucket = makeKvStatusError(memd.StatusNotMyVBucket) + + // ErrMemdNoBucket occurs when no bucket was selected on a connection. + ErrMemdNoBucket = makeKvStatusError(memd.StatusNoBucket) + + // ErrMemdLocked occurs when a document is already locked. + ErrMemdLocked = makeKvStatusError(memd.StatusLocked) + + // ErrMemdAuthStale occurs when authentication credentials have become invalidated. + ErrMemdAuthStale = makeKvStatusError(memd.StatusAuthStale) + + // ErrMemdAuthError occurs when the authentication information provided was not valid. + ErrMemdAuthError = makeKvStatusError(memd.StatusAuthError) + + // ErrMemdAuthContinue occurs in multi-step authentication when more authentication + // work needs to be performed in order to complete the authentication process. + ErrMemdAuthContinue = makeKvStatusError(memd.StatusAuthContinue) + + // ErrMemdRangeError occurs when the range specified to the server is not valid. + ErrMemdRangeError = makeKvStatusError(memd.StatusRangeError) + + // ErrMemdRollback occurs when a DCP stream fails to open due to a rollback having + // previously occurred since the last time the stream was opened. + ErrMemdRollback = makeKvStatusError(memd.StatusRollback) + + // ErrMemdAccessError occurs when an access error occurs. + ErrMemdAccessError = makeKvStatusError(memd.StatusAccessError) + + // ErrMemdNotInitialized is sent by servers which are still initializing, and are not + // yet ready to accept operations on behalf of a particular bucket. + ErrMemdNotInitialized = makeKvStatusError(memd.StatusNotInitialized) + + // ErrMemdUnknownCommand occurs when an unknown operation is sent to a server. + ErrMemdUnknownCommand = makeKvStatusError(memd.StatusUnknownCommand) + + // ErrMemdOutOfMemory occurs when the server cannot service a request due to memory + // limitations. + ErrMemdOutOfMemory = makeKvStatusError(memd.StatusOutOfMemory) + + // ErrMemdNotSupported occurs when an operation is understood by the server, but that + // operation is not supported on this server (occurs for a variety of reasons). + ErrMemdNotSupported = makeKvStatusError(memd.StatusNotSupported) + + // ErrMemdInternalError occurs when internal errors prevent the server from processing + // your request. + ErrMemdInternalError = makeKvStatusError(memd.StatusInternalError) + + // ErrMemdBusy occurs when the server is too busy to process your request right away. + // Attempting the operation at a later time will likely succeed. + ErrMemdBusy = makeKvStatusError(memd.StatusBusy) + + // ErrMemdTmpFail occurs when a temporary failure is preventing the server from + // processing your request. + ErrMemdTmpFail = makeKvStatusError(memd.StatusTmpFail) + + // ErrMemdCollectionNotFound occurs when a Collection cannot be found. + ErrMemdCollectionNotFound = makeKvStatusError(memd.StatusCollectionUnknown) + + // ErrMemdScopeNotFound occurs when a Collection cannot be found. + ErrMemdScopeNotFound = makeKvStatusError(memd.StatusScopeUnknown) + + // ErrMemdDurabilityInvalidLevel occurs when an invalid durability level was requested. + ErrMemdDurabilityInvalidLevel = makeKvStatusError(memd.StatusDurabilityInvalidLevel) + + // ErrMemdDurabilityImpossible occurs when a request is performed with impossible + // durability level requirements. + ErrMemdDurabilityImpossible = makeKvStatusError(memd.StatusDurabilityImpossible) + + // ErrMemdSyncWriteInProgess occurs when an attempt is made to write to a key that has + // a SyncWrite pending. + ErrMemdSyncWriteInProgess = makeKvStatusError(memd.StatusSyncWriteInProgress) + + // ErrMemdSyncWriteAmbiguous occurs when an SyncWrite does not complete in the specified + // time and the result is ambiguous. + ErrMemdSyncWriteAmbiguous = makeKvStatusError(memd.StatusSyncWriteAmbiguous) + + // ErrMemdSyncWriteReCommitInProgress occurs when an SyncWrite is being recommitted. + ErrMemdSyncWriteReCommitInProgress = makeKvStatusError(memd.StatusSyncWriteReCommitInProgress) + + // ErrMemdSubDocPathNotFound occurs when a sub-document operation targets a path + // which does not exist in the specifie document. + ErrMemdSubDocPathNotFound = makeKvStatusError(memd.StatusSubDocPathNotFound) + + // ErrMemdSubDocPathMismatch occurs when a sub-document operation specifies a path + // which does not match the document structure (field access on an array). + ErrMemdSubDocPathMismatch = makeKvStatusError(memd.StatusSubDocPathMismatch) + + // ErrMemdSubDocPathInvalid occurs when a sub-document path could not be parsed. + ErrMemdSubDocPathInvalid = makeKvStatusError(memd.StatusSubDocPathInvalid) + + // ErrMemdSubDocPathTooBig occurs when a sub-document path is too big. + ErrMemdSubDocPathTooBig = makeKvStatusError(memd.StatusSubDocPathTooBig) + + // ErrMemdSubDocDocTooDeep occurs when an operation would cause a document to be + // nested beyond the depth limits allowed by the sub-document specification. + ErrMemdSubDocDocTooDeep = makeKvStatusError(memd.StatusSubDocDocTooDeep) + + // ErrMemdSubDocCantInsert occurs when a sub-document operation could not insert. + ErrMemdSubDocCantInsert = makeKvStatusError(memd.StatusSubDocCantInsert) + + // ErrMemdSubDocNotJSON occurs when a sub-document operation is performed on a + // document which is not JSON. + ErrMemdSubDocNotJSON = makeKvStatusError(memd.StatusSubDocNotJSON) + + // ErrMemdSubDocBadRange occurs when a sub-document operation is performed with + // a bad range. + ErrMemdSubDocBadRange = makeKvStatusError(memd.StatusSubDocBadRange) + + // ErrMemdSubDocBadDelta occurs when a sub-document counter operation is performed + // and the specified delta is not valid. + ErrMemdSubDocBadDelta = makeKvStatusError(memd.StatusSubDocBadDelta) + + // ErrMemdSubDocPathExists occurs when a sub-document operation expects a path not + // to exists, but the path was found in the document. + ErrMemdSubDocPathExists = makeKvStatusError(memd.StatusSubDocPathExists) + + // ErrMemdSubDocValueTooDeep occurs when a sub-document operation specifies a value + // which is deeper than the depth limits of the sub-document specification. + ErrMemdSubDocValueTooDeep = makeKvStatusError(memd.StatusSubDocValueTooDeep) + + // ErrMemdSubDocBadCombo occurs when a multi-operation sub-document operation is + // performed and operations within the package of ops conflict with each other. + ErrMemdSubDocBadCombo = makeKvStatusError(memd.StatusSubDocBadCombo) + + // ErrMemdSubDocBadMulti occurs when a multi-operation sub-document operation is + // performed and operations within the package of ops conflict with each other. + ErrMemdSubDocBadMulti = makeKvStatusError(memd.StatusSubDocBadMulti) + + // ErrMemdSubDocSuccessDeleted occurs when a multi-operation sub-document operation + // is performed on a soft-deleted document. + ErrMemdSubDocSuccessDeleted = makeKvStatusError(memd.StatusSubDocSuccessDeleted) + + // ErrMemdSubDocXattrInvalidFlagCombo occurs when an invalid set of + // extended-attribute flags is passed to a sub-document operation. + ErrMemdSubDocXattrInvalidFlagCombo = makeKvStatusError(memd.StatusSubDocXattrInvalidFlagCombo) + + // ErrMemdSubDocXattrInvalidKeyCombo occurs when an invalid set of key operations + // are specified for a extended-attribute sub-document operation. + ErrMemdSubDocXattrInvalidKeyCombo = makeKvStatusError(memd.StatusSubDocXattrInvalidKeyCombo) + + // ErrMemdSubDocXattrUnknownMacro occurs when an invalid macro value is specified. + ErrMemdSubDocXattrUnknownMacro = makeKvStatusError(memd.StatusSubDocXattrUnknownMacro) + + // ErrMemdSubDocXattrUnknownVAttr occurs when an invalid virtual attribute is specified. + ErrMemdSubDocXattrUnknownVAttr = makeKvStatusError(memd.StatusSubDocXattrUnknownVAttr) + + // ErrMemdSubDocXattrCannotModifyVAttr occurs when a mutation is attempted upon + // a virtual attribute (which are immutable by definition). + ErrMemdSubDocXattrCannotModifyVAttr = makeKvStatusError(memd.StatusSubDocXattrCannotModifyVAttr) + + // ErrMemdSubDocMultiPathFailureDeleted occurs when a Multi Path Failure occurs on + // a soft-deleted document. + ErrMemdSubDocMultiPathFailureDeleted = makeKvStatusError(memd.StatusSubDocMultiPathFailureDeleted) +) diff --git a/vendor/github.com/couchbase/gocbcore/v9/errors_internal.go b/vendor/github.com/couchbase/gocbcore/v9/errors_internal.go new file mode 100644 index 000000000000..ba83e5ed11f8 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/errors_internal.go @@ -0,0 +1,402 @@ +package gocbcore + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type wrappedError struct { + Message string + InnerError error +} + +func (e wrappedError) Error() string { + return fmt.Sprintf("%s: %s", e.Message, e.InnerError.Error()) +} + +func (e wrappedError) Unwrap() error { + return e.InnerError +} + +func wrapError(err error, message string) error { + return wrappedError{ + Message: message, + InnerError: err, + } +} + +// SubDocumentError provides additional contextual information to +// sub-document specific errors. InnerError is always a KeyValueError. +type SubDocumentError struct { + InnerError error + Index int +} + +// Error returns the string representation of this error. +func (err SubDocumentError) Error() string { + return fmt.Sprintf("sub-document error at index %d: %s", + err.Index, + err.InnerError.Error()) +} + +// Unwrap returns the underlying error for the operation failing. +func (err SubDocumentError) Unwrap() error { + return err.InnerError +} + +func serializeError(err error) string { + errBytes, serErr := json.Marshal(err) + if serErr != nil { + logErrorf("failed to serialize error to json: %s", serErr.Error()) + } + return string(errBytes) +} + +// KeyValueError wraps key-value errors that occur within the SDK. +type KeyValueError struct { + InnerError error `json:"-"` + StatusCode memd.StatusCode `json:"status_code,omitempty"` + BucketName string `json:"bucket,omitempty"` + ScopeName string `json:"scope,omitempty"` + CollectionName string `json:"collection,omitempty"` + CollectionID uint32 `json:"collection_id,omitempty"` + ErrorName string `json:"error_name,omitempty"` + ErrorDescription string `json:"error_description,omitempty"` + Opaque uint32 `json:"opaque,omitempty"` + Context string `json:"context,omitempty"` + Ref string `json:"ref,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` + LastDispatchedTo string `json:"last_dispatched_to,omitempty"` + LastDispatchedFrom string `json:"last_dispatched_from,omitempty"` + LastConnectionID string `json:"last_connection_id,omitempty"` +} + +// Error returns the string representation of this error. +func (e KeyValueError) Error() string { + return e.InnerError.Error() + " | " + serializeError(e) +} + +// Unwrap returns the underlying reason for the error +func (e KeyValueError) Unwrap() error { + return e.InnerError +} + +// ViewQueryErrorDesc represents specific view error data. +type ViewQueryErrorDesc struct { + SourceNode string + Message string +} + +// ViewError represents an error returned from a view query. +type ViewError struct { + InnerError error `json:"-"` + DesignDocumentName string `json:"design_document_name,omitempty"` + ViewName string `json:"view_name,omitempty"` + Errors []ViewQueryErrorDesc `json:"errors,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e ViewError) Error() string { + return e.InnerError.Error() + " | " + serializeError(e) +} + +// Unwrap returns the underlying reason for the error +func (e ViewError) Unwrap() error { + return e.InnerError +} + +// N1QLErrorDesc represents specific n1ql error data. +type N1QLErrorDesc struct { + Code uint32 + Message string +} + +// N1QLError represents an error returned from a n1ql query. +type N1QLError struct { + InnerError error `json:"-"` + Statement string `json:"statement,omitempty"` + ClientContextID string `json:"client_context_id,omitempty"` + Errors []N1QLErrorDesc `json:"errors,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e N1QLError) Error() string { + return e.InnerError.Error() + " | " + serializeError(e) +} + +// Unwrap returns the underlying reason for the error +func (e N1QLError) Unwrap() error { + return e.InnerError +} + +// AnalyticsErrorDesc represents specific analytics error data. +type AnalyticsErrorDesc struct { + Code uint32 + Message string +} + +// AnalyticsError represents an error returned from an analytics query. +type AnalyticsError struct { + InnerError error `json:"-"` + Statement string `json:"statement,omitempty"` + ClientContextID string `json:"client_context_id,omitempty"` + Errors []AnalyticsErrorDesc `json:"errors,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e AnalyticsError) Error() string { + return e.InnerError.Error() + " | " + serializeError(e) +} + +// Unwrap returns the underlying reason for the error +func (e AnalyticsError) Unwrap() error { + return e.InnerError +} + +// SearchError represents an error returned from a search query. +type SearchError struct { + InnerError error `json:"-"` + IndexName string `json:"index_name,omitempty"` + Query interface{} `json:"query,omitempty"` + ErrorText string `json:"error_text"` + HTTPResponseCode int `json:"status_code,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e SearchError) Error() string { + return e.InnerError.Error() + " | " + serializeError(e) +} + +// Unwrap returns the underlying reason for the error +func (e SearchError) Unwrap() error { + return e.InnerError +} + +// HTTPError represents an error returned from an HTTP request. +type HTTPError struct { + InnerError error `json:"-"` + UniqueID string `json:"unique_id,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + RetryReasons []RetryReason `json:"retry_reasons,omitempty"` + RetryAttempts uint32 `json:"retry_attempts,omitempty"` +} + +// Error returns the string representation of this error. +func (e HTTPError) Error() string { + return e.InnerError.Error() + " | " + serializeError(e) +} + +// Unwrap returns the underlying reason for the error +func (e HTTPError) Unwrap() error { + return e.InnerError +} + +// TimeoutError wraps timeout errors that occur within the SDK. +type TimeoutError struct { + InnerError error + OperationID string + Opaque string + TimeObserved time.Duration + RetryReasons []RetryReason + RetryAttempts uint32 + LastDispatchedTo string + LastDispatchedFrom string + LastConnectionID string +} + +type timeoutError struct { + InnerError error `json:"-"` + OperationID string `json:"s,omitempty"` + Opaque string `json:"i,omitempty"` + TimeObserved uint64 `json:"t,omitempty"` + RetryReasons []RetryReason `json:"rr,omitempty"` + RetryAttempts uint32 `json:"ra,omitempty"` + LastDispatchedTo string `json:"r,omitempty"` + LastDispatchedFrom string `json:"l,omitempty"` + LastConnectionID string `json:"c,omitempty"` +} + +// MarshalJSON implements the Marshaler interface. +func (err *TimeoutError) MarshalJSON() ([]byte, error) { + toMarshal := timeoutError{ + InnerError: err.InnerError, + OperationID: err.OperationID, + Opaque: err.Opaque, + TimeObserved: uint64(err.TimeObserved / time.Microsecond), + RetryReasons: err.RetryReasons, + RetryAttempts: err.RetryAttempts, + LastDispatchedTo: err.LastDispatchedTo, + LastDispatchedFrom: err.LastDispatchedFrom, + LastConnectionID: err.LastConnectionID, + } + + return json.Marshal(toMarshal) +} + +// UnmarshalJSON implements the Unmarshaler interface. +func (err *TimeoutError) UnmarshalJSON(data []byte) error { + var tErr timeoutError + if jErr := json.Unmarshal(data, &tErr); jErr != nil { + return jErr + } + + duration := time.Duration(tErr.TimeObserved) * time.Microsecond + + err.InnerError = tErr.InnerError + err.OperationID = tErr.OperationID + err.Opaque = tErr.Opaque + err.TimeObserved = duration + err.RetryReasons = tErr.RetryReasons + err.RetryAttempts = tErr.RetryAttempts + err.LastDispatchedTo = tErr.LastDispatchedTo + err.LastDispatchedFrom = tErr.LastDispatchedFrom + err.LastConnectionID = tErr.LastConnectionID + + return nil +} + +func (err TimeoutError) Error() string { + return err.InnerError.Error() + " | " + serializeError(err) +} + +// Unwrap returns the underlying reason for the error +func (err TimeoutError) Unwrap() error { + return err.InnerError +} + +// ncError is a wrapper error that provides no additional context to one of the +// publicly exposed error types. This is to force people to correctly use the +// error handling behaviours to check the error, rather than direct compares. +type ncError struct { + InnerError error +} + +func (err ncError) Error() string { + return err.InnerError.Error() +} + +func (err ncError) Unwrap() error { + return err.InnerError +} + +func isErrorStatus(err error, code memd.StatusCode) bool { + var kvErr *KeyValueError + if errors.As(err, &kvErr) { + return kvErr.StatusCode == code + } + return false +} + +var ( + // errCircuitBreakerOpen is passed around internally to signal that an + // operation was cancelled due to the circuit breaker being open. + errCircuitBreakerOpen = errors.New("circuit breaker open") + errNoCCCPHosts = errors.New("no cccp hosts available") +) + +// This list contains protected versions of all the errors we throw +// to ensure no users inadvertently rely on direct comparisons. +// nolint: deadcode,varcheck +var ( + errTimeout = ncError{ErrTimeout} + errRequestCanceled = ncError{ErrRequestCanceled} + errInvalidArgument = ncError{ErrInvalidArgument} + errServiceNotAvailable = ncError{ErrServiceNotAvailable} + errInternalServerFailure = ncError{ErrInternalServerFailure} + errAuthenticationFailure = ncError{ErrAuthenticationFailure} + errTemporaryFailure = ncError{ErrTemporaryFailure} + errParsingFailure = ncError{ErrParsingFailure} + errCasMismatch = ncError{ErrCasMismatch} + errBucketNotFound = ncError{ErrBucketNotFound} + errCollectionNotFound = ncError{ErrCollectionNotFound} + errEncodingFailure = ncError{ErrEncodingFailure} + errDecodingFailure = ncError{ErrDecodingFailure} + errUnsupportedOperation = ncError{ErrUnsupportedOperation} + errAmbiguousTimeout = ncError{ErrAmbiguousTimeout} + errUnambiguousTimeout = ncError{ErrUnambiguousTimeout} + errFeatureNotAvailable = ncError{ErrFeatureNotAvailable} + errScopeNotFound = ncError{ErrScopeNotFound} + errIndexNotFound = ncError{ErrIndexNotFound} + errIndexExists = ncError{ErrIndexExists} + errGCCCPInUse = ncError{ErrGCCCPInUse} + errNotMyVBucket = ncError{ErrNotMyVBucket} + + errDocumentNotFound = ncError{ErrDocumentNotFound} + errDocumentUnretrievable = ncError{ErrDocumentUnretrievable} + errDocumentLocked = ncError{ErrDocumentLocked} + errValueTooLarge = ncError{ErrValueTooLarge} + errDocumentExists = ncError{ErrDocumentExists} + errValueNotJSON = ncError{ErrValueNotJSON} + errDurabilityLevelNotAvailable = ncError{ErrDurabilityLevelNotAvailable} + errDurabilityImpossible = ncError{ErrDurabilityImpossible} + errDurabilityAmbiguous = ncError{ErrDurabilityAmbiguous} + errDurableWriteInProgress = ncError{ErrDurableWriteInProgress} + errDurableWriteReCommitInProgress = ncError{ErrDurableWriteReCommitInProgress} + errMutationLost = ncError{ErrMutationLost} + errPathNotFound = ncError{ErrPathNotFound} + errPathMismatch = ncError{ErrPathMismatch} + errPathInvalid = ncError{ErrPathInvalid} + errPathTooBig = ncError{ErrPathTooBig} + errPathTooDeep = ncError{ErrPathTooDeep} + errValueTooDeep = ncError{ErrValueTooDeep} + errValueInvalid = ncError{ErrValueInvalid} + errDocumentNotJSON = ncError{ErrDocumentNotJSON} + errNumberTooBig = ncError{ErrNumberTooBig} + errDeltaInvalid = ncError{ErrDeltaInvalid} + errPathExists = ncError{ErrPathExists} + errXattrUnknownMacro = ncError{ErrXattrUnknownMacro} + errXattrInvalidFlagCombo = ncError{ErrXattrInvalidFlagCombo} + errXattrInvalidKeyCombo = ncError{ErrXattrInvalidKeyCombo} + errXattrUnknownVirtualAttribute = ncError{ErrXattrUnknownVirtualAttribute} + errXattrCannotModifyVirtualAttribute = ncError{ErrXattrCannotModifyVirtualAttribute} + errXattrInvalidOrder = ncError{ErrXattrInvalidOrder} + + errPlanningFailure = ncError{ErrPlanningFailure} + errIndexFailure = ncError{ErrIndexFailure} + errPreparedStatementFailure = ncError{ErrPreparedStatementFailure} + + errCompilationFailure = ncError{ErrCompilationFailure} + errJobQueueFull = ncError{ErrJobQueueFull} + errDatasetNotFound = ncError{ErrDatasetNotFound} + errDataverseNotFound = ncError{ErrDataverseNotFound} + errDatasetExists = ncError{ErrDatasetExists} + errDataverseExists = ncError{ErrDataverseExists} + errLinkNotFound = ncError{ErrLinkNotFound} + + errViewNotFound = ncError{ErrViewNotFound} + errDesignDocumentNotFound = ncError{ErrDesignDocumentNotFound} + + errNoSupportedMechanisms = ncError{ErrNoSupportedMechanisms} + errBadHosts = ncError{ErrBadHosts} + errProtocol = ncError{ErrProtocol} + errNoReplicas = ncError{ErrNoReplicas} + errCliInternalError = ncError{ErrCliInternalError} + errInvalidCredentials = ncError{ErrInvalidCredentials} + errInvalidServer = ncError{ErrInvalidServer} + errInvalidVBucket = ncError{ErrInvalidVBucket} + errInvalidReplica = ncError{ErrInvalidReplica} + errInvalidService = ncError{ErrInvalidService} + errInvalidCertificate = ncError{ErrInvalidCertificate} + errCollectionsUnsupported = ncError{ErrCollectionsUnsupported} + errBucketAlreadySelected = ncError{ErrBucketAlreadySelected} + errShutdown = ncError{ErrShutdown} + errOverload = ncError{ErrOverload} + errStreamIDNotEnabled = ncError{ErrStreamIDNotEnabled} +) diff --git a/vendor/github.com/couchbase/gocbcore/v9/go.mod b/vendor/github.com/couchbase/gocbcore/v9/go.mod new file mode 100644 index 000000000000..60793e383fdc --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/go.mod @@ -0,0 +1,9 @@ +module github.com/couchbase/gocbcore/v9 + +require ( + github.com/golang/snappy v0.0.1 + github.com/google/uuid v1.1.1 + github.com/stretchr/testify v1.5.1 +) + +go 1.13 diff --git a/vendor/github.com/couchbase/gocbcore/v9/go.sum b/vendor/github.com/couchbase/gocbcore/v9/go.sum new file mode 100644 index 000000000000..3030c4cac54f --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/couchbase/gocbcore/v9/http.go b/vendor/github.com/couchbase/gocbcore/v9/http.go new file mode 100644 index 000000000000..df7a289340f0 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/http.go @@ -0,0 +1,118 @@ +package gocbcore + +import ( + "context" + "errors" + "io" + "sort" + "sync/atomic" + "time" +) + +type httpRequest struct { + Service ServiceType + Endpoint string + Method string + Path string + Username string + Password string + Headers map[string]string + ContentType string + Body []byte + IsIdempotent bool + UniqueID string + Deadline time.Time + RetryStrategy RetryStrategy + RootTraceContext RequestSpanContext + // Whilst the http component will handle deadlines itself this context can be use from places like Ping which + // need to also be able to cancel the context for other reasons. + Context context.Context + CancelFunc context.CancelFunc + + retryCount uint32 + retryReasons []RetryReason +} + +func (hr *httpRequest) retryStrategy() RetryStrategy { + return hr.RetryStrategy +} + +func (hr *httpRequest) Cancel() { + if hr.CancelFunc != nil { + hr.CancelFunc() + } +} + +func (hr *httpRequest) RetryAttempts() uint32 { + return atomic.LoadUint32(&hr.retryCount) +} + +func (hr *httpRequest) Identifier() string { + return hr.UniqueID +} + +func (hr *httpRequest) Idempotent() bool { + return hr.IsIdempotent +} + +func (hr *httpRequest) RetryReasons() []RetryReason { + return hr.retryReasons +} + +func (hr *httpRequest) recordRetryAttempt(reason RetryReason) { + atomic.AddUint32(&hr.retryCount, 1) + idx := sort.Search(len(hr.retryReasons), func(i int) bool { + return hr.retryReasons[i] == reason + }) + + // if idx is out of the range of retryReasons then it wasn't found. + if idx > len(hr.retryReasons)-1 { + hr.retryReasons = append(hr.retryReasons, reason) + } +} + +// HTTPRequest contains the description of an HTTP request to perform. +type HTTPRequest struct { + Service ServiceType + Method string + Endpoint string + Path string + Username string + Password string + Body []byte + Headers map[string]string + ContentType string + IsIdempotent bool + UniqueID string + Deadline time.Time + RetryStrategy RetryStrategy + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// HTTPResponse encapsulates the response from an HTTP request. +type HTTPResponse struct { + Endpoint string + StatusCode int + Body io.ReadCloser +} + +func wrapHTTPError(req *httpRequest, err error) HTTPError { + if err == nil { + err = errors.New("http error") + } + + ierr := HTTPError{ + InnerError: err, + } + + if req != nil { + ierr.Endpoint = req.Endpoint + ierr.UniqueID = req.UniqueID + ierr.RetryAttempts = req.RetryAttempts() + ierr.RetryReasons = req.RetryReasons() + } + + return ierr +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/httpcfgcontroller.go b/vendor/github.com/couchbase/gocbcore/v9/httpcfgcontroller.go new file mode 100644 index 000000000000..0a64e43cd0e9 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/httpcfgcontroller.go @@ -0,0 +1,290 @@ +package gocbcore + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" +) + +type configStreamBlock struct { + Bytes []byte +} + +func (i *configStreamBlock) UnmarshalJSON(data []byte) error { + i.Bytes = make([]byte, len(data)) + copy(i.Bytes, data) + return nil +} + +func hostnameFromURI(uri string) string { + uriInfo, err := url.Parse(uri) + if err != nil { + return uri + } + + hostname, err := hostFromHostPort(uriInfo.Host) + if err != nil { + return uri + } + + return hostname +} + +type httpConfigController struct { + muxer *httpMux + cfgMgr *configManagementComponent + confHTTPRetryDelay time.Duration + confHTTPRedialPeriod time.Duration + httpComponent *httpComponent + bucketName string + + looperStopSig chan struct{} + looperDoneSig chan struct{} + + fetchErr error + errLock sync.Mutex +} + +type httpPollerProperties struct { + confHTTPRetryDelay time.Duration + confHTTPRedialPeriod time.Duration + httpComponent *httpComponent +} + +func newHTTPConfigController(bucketName string, props httpPollerProperties, muxer *httpMux, + cfgMgr *configManagementComponent) *httpConfigController { + return &httpConfigController{ + muxer: muxer, + cfgMgr: cfgMgr, + confHTTPRedialPeriod: props.confHTTPRedialPeriod, + confHTTPRetryDelay: props.confHTTPRetryDelay, + httpComponent: props.httpComponent, + bucketName: bucketName, + + looperStopSig: make(chan struct{}), + looperDoneSig: make(chan struct{}), + } +} + +func (hcc *httpConfigController) Error() error { + hcc.errLock.Lock() + defer hcc.errLock.Unlock() + return hcc.fetchErr +} + +func (hcc *httpConfigController) setError(err error) { + hcc.errLock.Lock() + hcc.fetchErr = err + hcc.errLock.Unlock() +} + +func (hcc *httpConfigController) Pause(paused bool) { +} + +func (hcc *httpConfigController) Done() chan struct{} { + return hcc.looperDoneSig +} + +func (hcc *httpConfigController) Stop() { + close(hcc.looperStopSig) +} + +func (hcc *httpConfigController) Reset() { + hcc.looperStopSig = make(chan struct{}) + hcc.looperDoneSig = make(chan struct{}) +} + +func (hcc *httpConfigController) DoLoop() { + waitPeriod := hcc.confHTTPRetryDelay + maxConnPeriod := hcc.confHTTPRedialPeriod + + var iterNum uint64 = 1 + iterSawConfig := false + seenNodes := make(map[string]uint64) + + logDebugf("HTTP Looper starting.") + +Looper: + for { + select { + case <-hcc.looperStopSig: + break Looper + default: + } + + var pickedSrv string + for _, srv := range hcc.muxer.MgmtEps() { + if seenNodes[srv] >= iterNum { + continue + } + pickedSrv = srv + break + } + + if pickedSrv == "" { + logDebugf("Pick Failed.") + // All servers have been visited during this iteration + + if !iterSawConfig { + logDebugf("Looper waiting...") + // Wait for a period before trying again if there was a problem... + // We also watch for the client being shut down. + select { + case <-hcc.looperStopSig: + break Looper + case <-time.After(waitPeriod): + } + } + logDebugf("Looping again.") + // Go to next iteration and try all servers again + iterNum++ + iterSawConfig = false + continue + } + + logDebugf("Http Picked: %s.", pickedSrv) + + seenNodes[pickedSrv] = iterNum + + hostname := hostnameFromURI(pickedSrv) + logDebugf("HTTP Hostname: %s.", hostname) + + var resp *HTTPResponse + // 1 on success, 0 on failure for node, -1 for generic failure + var doConfigRequest func(bool) int + + doConfigRequest = func(is2x bool) int { + streamPath := "bs" + if is2x { + streamPath = "bucketsStreaming" + } + // HTTP request time! + uri := fmt.Sprintf("/pools/default/%s/%s", streamPath, hcc.bucketName) + logDebugf("Requesting config from: %s/%s.", pickedSrv, uri) + + req := &httpRequest{ + Service: MgmtService, + Method: "GET", + Path: uri, + Endpoint: pickedSrv, + UniqueID: uuid.New().String(), + } + + var err error + resp, err = hcc.httpComponent.DoInternalHTTPRequest(req, true) + if err != nil { + logDebugf("Failed to connect to host. %v", err) + hcc.setError(err) + return 0 + } + + if resp.StatusCode != 200 { + err := resp.Body.Close() + if err != nil { + logErrorf("Socket close failed handling status code != 200 (%s)", err) + } + if resp.StatusCode == 401 { + logDebugf("Failed to connect to host, bad auth.") + hcc.setError(errAuthenticationFailure) + return -1 + } else if resp.StatusCode == 404 { + if is2x { + logDebugf("Failed to connect to host, bad bucket.") + hcc.setError(errAuthenticationFailure) + return -1 + } + + return doConfigRequest(true) + } + logDebugf("Failed to connect to host, unexpected status code: %v.", resp.StatusCode) + hcc.setError(errCliInternalError) + return 0 + } + hcc.setError(nil) + return 1 + } + + switch doConfigRequest(false) { + case 0: + continue + case -1: + continue + } + + logDebugf("Connected.") + + var autoDisconnected int32 + + // Autodisconnect eventually + go func() { + select { + case <-time.After(maxConnPeriod): + case <-hcc.looperStopSig: + } + + logDebugf("Automatically resetting our HTTP connection") + + atomic.StoreInt32(&autoDisconnected, 1) + + err := resp.Body.Close() + if err != nil { + logErrorf("Socket close failed during auto-dc (%s)", err) + } + }() + + dec := json.NewDecoder(resp.Body) + configBlock := new(configStreamBlock) + for { + err := dec.Decode(configBlock) + if err != nil { + if atomic.LoadInt32(&autoDisconnected) == 1 { + // If we know we intentionally disconnected, we know we do not + // need to close the client, nor log an error, since this was + // expected behaviour + break + } + + logWarnf("Config block decode failure (%s)", err) + + if err != io.EOF { + err = resp.Body.Close() + if err != nil { + logErrorf("Socket close failed after decode fail (%s)", err) + } + } + + break + } + + logDebugf("Got Block: %v", string(configBlock.Bytes)) + + bkCfg, err := parseConfig(configBlock.Bytes, hostname) + if err != nil { + logDebugf("Got error while parsing config: %v", err) + + err = resp.Body.Close() + if err != nil { + logErrorf("Socket close failed after parsing fail (%s)", err) + } + + break + } + + logDebugf("Got Config.") + + iterSawConfig = true + logDebugf("HTTP Config Update") + hcc.cfgMgr.OnNewConfig(bkCfg) + } + + logDebugf("HTTP, Setting %s to iter %d", pickedSrv, iterNum) + } + + close(hcc.looperDoneSig) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/httpclientmux.go b/vendor/github.com/couchbase/gocbcore/v9/httpclientmux.go new file mode 100644 index 000000000000..90193ccbb922 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/httpclientmux.go @@ -0,0 +1,27 @@ +package gocbcore + +type httpClientMux struct { + capiEpList []string + mgmtEpList []string + n1qlEpList []string + ftsEpList []string + cbasEpList []string + + uuid string + revID int64 + breakerCfg CircuitBreakerConfig +} + +func newHTTPClientMux(cfg *routeConfig, breakerCfg CircuitBreakerConfig) *httpClientMux { + return &httpClientMux{ + capiEpList: cfg.capiEpList, + mgmtEpList: cfg.mgmtEpList, + n1qlEpList: cfg.n1qlEpList, + ftsEpList: cfg.ftsEpList, + cbasEpList: cfg.cbasEpList, + + uuid: cfg.uuid, + revID: cfg.revID, + breakerCfg: breakerCfg, + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/httpcomponent.go b/vendor/github.com/couchbase/gocbcore/v9/httpcomponent.go new file mode 100644 index 000000000000..939e20e961cd --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/httpcomponent.go @@ -0,0 +1,416 @@ +package gocbcore + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "math/rand" + "net/http" + "sync/atomic" + "time" + + "github.com/google/uuid" +) + +type httpComponentInterface interface { + DoInternalHTTPRequest(req *httpRequest, skipConfigCheck bool) (*HTTPResponse, error) +} + +type httpComponent struct { + cli *http.Client + muxer *httpMux + auth AuthProvider + userAgent string + tracer *tracerComponent + defaultRetryStrategy RetryStrategy +} + +type httpComponentProps struct { + UserAgent string + DefaultRetryStrategy RetryStrategy +} + +func newHTTPComponent(props httpComponentProps, cli *http.Client, muxer *httpMux, auth AuthProvider, + tracer *tracerComponent) *httpComponent { + return &httpComponent{ + cli: cli, + muxer: muxer, + auth: auth, + userAgent: props.UserAgent, + defaultRetryStrategy: props.DefaultRetryStrategy, + tracer: tracer, + } +} + +func (hc *httpComponent) Close() { + if tsport, ok := hc.cli.Transport.(*http.Transport); ok { + tsport.CloseIdleConnections() + } else { + logDebugf("Could not close idle connections for transport") + } +} + +func (hc *httpComponent) DoHTTPRequest(req *HTTPRequest, cb DoHTTPRequestCallback) (PendingOp, error) { + tracer := hc.tracer.CreateOpTrace("http", req.TraceContext) + defer tracer.Finish() + + retryStrategy := hc.defaultRetryStrategy + if req.RetryStrategy != nil { + retryStrategy = req.RetryStrategy + } + + ctx, cancel := context.WithCancel(context.Background()) + + ireq := &httpRequest{ + Service: req.Service, + Endpoint: req.Endpoint, + Method: req.Method, + Path: req.Path, + Headers: req.Headers, + ContentType: req.ContentType, + Username: req.Username, + Password: req.Password, + Body: req.Body, + IsIdempotent: req.IsIdempotent, + UniqueID: req.UniqueID, + Deadline: req.Deadline, + RetryStrategy: retryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + go func() { + resp, err := hc.DoInternalHTTPRequest(ireq, false) + if err != nil { + cancel() + cb(nil, wrapHTTPError(ireq, err)) + return + } + + cb(resp, nil) + }() + + return ireq, nil +} + +func (hc *httpComponent) DoInternalHTTPRequest(req *httpRequest, skipConfigCheck bool) (*HTTPResponse, error) { + if req.Service == MemdService { + return nil, errInvalidService + } + + // This creates a context that has a parent with no cancel function. As such WithCancel will not setup any + // extra go routines and we only need to call cancel on (non-timeout) failure. + ctx := req.Context + if ctx == nil { + ctx = context.Background() + } + ctx, ctxCancel := context.WithCancel(ctx) + + // This is easy to do with a bool and a defer than to ensure that we cancel after every error. + doneCh := make(chan struct{}, 1) + querySuccess := false + defer func() { + doneCh <- struct{}{} + if !querySuccess { + ctxCancel() + } + }() + + start := time.Now() + var cancelationIsTimeout uint32 + // Having no deadline is a legitimate case. + if !req.Deadline.IsZero() { + go func() { + select { + case <-time.After(req.Deadline.Sub(start)): + atomic.StoreUint32(&cancelationIsTimeout, 1) + ctxCancel() + case <-doneCh: + } + }() + } + + if !skipConfigCheck { + for { + revID, err := hc.muxer.ConfigRev() + if err != nil { + return nil, err + } + + if revID > -1 { + break + } + + // We've not successfully been setup with a cluster map yet + select { + case <-ctx.Done(): + err := ctx.Err() + if errors.Is(err, context.Canceled) { + isTimeout := atomic.LoadUint32(&cancelationIsTimeout) + if isTimeout == 1 { + if req.IsIdempotent { + return nil, errUnambiguousTimeout + } + return nil, errAmbiguousTimeout + } + + return nil, errRequestCanceled + } + + return nil, err + case <-time.After(500 * time.Microsecond): + } + } + } + + // Identify an endpoint to use for the request + endpoint := req.Endpoint + if endpoint == "" { + var err error + switch req.Service { + case MgmtService: + endpoint, err = hc.getMgmtEp() + case CapiService: + endpoint, err = hc.getCapiEp() + case N1qlService: + endpoint, err = hc.getN1qlEp() + case FtsService: + endpoint, err = hc.getFtsEp() + case CbasService: + endpoint, err = hc.getCbasEp() + } + if err != nil { + return nil, err + } + } + + // Generate a request URI + reqURI := endpoint + req.Path + + // Create a new request + hreq, err := http.NewRequest(req.Method, reqURI, nil) + if err != nil { + return nil, err + } + + // Lets add our context to the httpRequest + hreq = hreq.WithContext(ctx) + + body := req.Body + + // Inject credentials into the request + if req.Username != "" || req.Password != "" { + hreq.SetBasicAuth(req.Username, req.Password) + } else { + creds, err := hc.auth.Credentials(AuthCredsRequest{ + Service: req.Service, + Endpoint: endpoint, + }) + if err != nil { + return nil, err + } + + if req.Service == N1qlService || req.Service == CbasService || + req.Service == FtsService { + // Handle service which support multi-bucket authentication using + // injection into the body of the request. + if len(creds) == 1 { + hreq.SetBasicAuth(creds[0].Username, creds[0].Password) + } else { + body = injectJSONCreds(body, creds) + } + } else { + if len(creds) != 1 { + return nil, errInvalidCredentials + } + + hreq.SetBasicAuth(creds[0].Username, creds[0].Password) + } + } + + hreq.Body = ioutil.NopCloser(bytes.NewReader(body)) + + if req.ContentType != "" { + hreq.Header.Set("Content-Type", req.ContentType) + } else { + hreq.Header.Set("Content-Type", "application/json") + } + for key, val := range req.Headers { + hreq.Header.Set(key, val) + } + + var uniqueID string + if req.UniqueID != "" { + uniqueID = req.UniqueID + } else { + uniqueID = uuid.New().String() + } + hreq.Header.Set("User-Agent", clientInfoString(uniqueID, hc.userAgent)) + + for { + dSpan := hc.tracer.StartHTTPSpan(req, "dispatch_to_server") + logSchedf("Writing HTTP request to %s ID=%s", reqURI, req.UniqueID) + // we can't close the body of this response as it's long lived beyond the function + hresp, err := hc.cli.Do(hreq) // nolint: bodyclose + dSpan.Finish() + if err != nil { + logSchedf("Received HTTP Response for ID=%s, errored", req.UniqueID) + // Because we don't use the http request context itself to perform timeouts we need to do some translation + // of the error message here for better UX. + if errors.Is(err, context.Canceled) { + isTimeout := atomic.LoadUint32(&cancelationIsTimeout) + if isTimeout == 1 { + if req.IsIdempotent { + err = &TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "http", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: req.retryReasons, + RetryAttempts: req.retryCount, + LastDispatchedTo: endpoint, + } + } else { + err = &TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "http", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: req.retryReasons, + RetryAttempts: req.retryCount, + LastDispatchedTo: endpoint, + } + } + } else { + err = errRequestCanceled + } + } + + if !req.IsIdempotent { + return nil, err + } + + isUserError := false + isUserError = isUserError || errors.Is(err, context.DeadlineExceeded) + isUserError = isUserError || errors.Is(err, context.Canceled) + isUserError = isUserError || errors.Is(err, ErrRequestCanceled) + isUserError = isUserError || errors.Is(err, ErrTimeout) + if isUserError { + return nil, err + } + + var retryReason RetryReason + if errors.Is(err, io.ErrUnexpectedEOF) { + retryReason = SocketCloseInFlightRetryReason + } + + if retryReason == nil { + return nil, err + } + + shouldRetry, retryTime := retryOrchMaybeRetry(req, retryReason) + if !shouldRetry { + return nil, err + } + + select { + case <-time.After(time.Until(retryTime)): + // continue! + case <-time.After(time.Until(req.Deadline)): + if errors.Is(err, context.DeadlineExceeded) { + err = &TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "http", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: req.retryReasons, + RetryAttempts: req.retryCount, + LastDispatchedTo: endpoint, + } + } + + return nil, err + } + + continue + } + logSchedf("Received HTTP Response for ID=%s, status=%d", req.UniqueID, hresp.StatusCode) + + respOut := HTTPResponse{ + Endpoint: endpoint, + StatusCode: hresp.StatusCode, + Body: hresp.Body, + } + + querySuccess = true + + return &respOut, nil + } +} + +func (hc *httpComponent) getMgmtEp() (string, error) { + mgmtEps := hc.muxer.MgmtEps() + if len(mgmtEps) == 0 { + return "", errServiceNotAvailable + } + return mgmtEps[rand.Intn(len(mgmtEps))], nil +} + +func (hc *httpComponent) getCapiEp() (string, error) { + capiEps := hc.muxer.CapiEps() + if len(capiEps) == 0 { + return "", errServiceNotAvailable + } + return capiEps[rand.Intn(len(capiEps))], nil +} + +func (hc *httpComponent) getN1qlEp() (string, error) { + n1qlEps := hc.muxer.N1qlEps() + if len(n1qlEps) == 0 { + return "", errServiceNotAvailable + } + return n1qlEps[rand.Intn(len(n1qlEps))], nil +} + +func (hc *httpComponent) getFtsEp() (string, error) { + ftsEps := hc.muxer.FtsEps() + if len(ftsEps) == 0 { + return "", errServiceNotAvailable + } + return ftsEps[rand.Intn(len(ftsEps))], nil +} + +func (hc *httpComponent) getCbasEp() (string, error) { + cbasEps := hc.muxer.CbasEps() + if len(cbasEps) == 0 { + return "", errServiceNotAvailable + } + return cbasEps[rand.Intn(len(cbasEps))], nil +} + +func injectJSONCreds(body []byte, creds []UserPassPair) []byte { + var props map[string]json.RawMessage + err := json.Unmarshal(body, &props) + if err == nil { + if _, ok := props["creds"]; ok { + // Early out if the user has already passed a set of credentials. + return body + } + + jsonCreds, err := json.Marshal(creds) + if err == nil { + props["creds"] = json.RawMessage(jsonCreds) + + newBody, err := json.Marshal(props) + if err == nil { + return newBody + } + } + } + + return body +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/httpmux.go b/vendor/github.com/couchbase/gocbcore/v9/httpmux.go new file mode 100644 index 000000000000..44c93d78ea73 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/httpmux.go @@ -0,0 +1,118 @@ +package gocbcore + +import ( + "sync/atomic" + "unsafe" +) + +type httpMux struct { + muxPtr unsafe.Pointer + breakerCfg CircuitBreakerConfig + cfgMgr configManager +} + +func newHTTPMux(breakerCfg CircuitBreakerConfig, cfgMgr configManager) *httpMux { + mux := &httpMux{ + breakerCfg: breakerCfg, + cfgMgr: cfgMgr, + } + + cfgMgr.AddConfigWatcher(mux) + + return mux +} + +func (mux *httpMux) Get() *httpClientMux { + return (*httpClientMux)(atomic.LoadPointer(&mux.muxPtr)) +} + +func (mux *httpMux) Update(old, new *httpClientMux) bool { + if new == nil { + logErrorf("Attempted to update to nil httpClientMux") + return false + } + + if old != nil { + return atomic.CompareAndSwapPointer(&mux.muxPtr, unsafe.Pointer(old), unsafe.Pointer(new)) + } + + if atomic.SwapPointer(&mux.muxPtr, unsafe.Pointer(new)) != nil { + logErrorf("Updated from nil attempted on initialized httpClientMux") + return false + } + + return true +} + +func (mux *httpMux) Clear() *httpClientMux { + val := atomic.SwapPointer(&mux.muxPtr, nil) + return (*httpClientMux)(val) +} + +func (mux *httpMux) OnNewRouteConfig(cfg *routeConfig) { + oldHTTPMux := mux.Get() + + newHTTPMux := newHTTPClientMux(cfg, mux.breakerCfg) + + mux.Update(oldHTTPMux, newHTTPMux) +} + +func (mux *httpMux) CapiEps() []string { + clientMux := mux.Get() + if clientMux == nil { + return nil + } + + return clientMux.capiEpList +} + +func (mux *httpMux) MgmtEps() []string { + clientMux := mux.Get() + if clientMux == nil { + return nil + } + + return clientMux.mgmtEpList +} + +func (mux *httpMux) N1qlEps() []string { + clientMux := mux.Get() + if clientMux == nil { + return nil + } + + return clientMux.n1qlEpList +} + +func (mux *httpMux) CbasEps() []string { + clientMux := mux.Get() + if clientMux == nil { + return nil + } + + return clientMux.cbasEpList +} + +func (mux *httpMux) FtsEps() []string { + clientMux := mux.Get() + if clientMux == nil { + return nil + } + + return clientMux.ftsEpList +} + +func (mux *httpMux) ConfigRev() (int64, error) { + clientMux := mux.Get() + if clientMux == nil { + return 0, errShutdown + } + + return clientMux.revID, nil +} + +func (mux *httpMux) Close() error { + mux.cfgMgr.RemoveConfigWatcher(mux) + mux.Clear() + return nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/ketama.go b/vendor/github.com/couchbase/gocbcore/v9/ketama.go new file mode 100644 index 000000000000..71d5d7a9d5b3 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/ketama.go @@ -0,0 +1,117 @@ +package gocbcore + +import ( + "crypto/md5" // nolint: gosec + "fmt" + "sort" +) + +// "Point" in the ring hash entry. See lcbvb_CONTINUUM +type routeKetamaContinuum struct { + index uint32 + point uint32 +} + +type ketamaSorter struct { + elems []routeKetamaContinuum +} + +func (c ketamaSorter) Len() int { return len(c.elems) } +func (c ketamaSorter) Swap(i, j int) { c.elems[i], c.elems[j] = c.elems[j], c.elems[i] } +func (c ketamaSorter) Less(i, j int) bool { return c.elems[i].point < c.elems[j].point } + +type ketamaContinuum struct { + entries []routeKetamaContinuum +} + +func ketamaHash(key []byte) uint32 { + digest := md5.Sum(key) // nolint: gosec + + return ((uint32(digest[3])&0xFF)<<24 | + (uint32(digest[2])&0xFF)<<16 | + (uint32(digest[1])&0xFF)<<8 | + (uint32(digest[0]) & 0xFF)) & 0xffffffff +} + +func newKetamaContinuum(serverList []string) *ketamaContinuum { + continuum := ketamaContinuum{} + + // Libcouchbase presorts this. Might not strictly be required.. + sort.Strings(serverList) + + for ss, authority := range serverList { + // 160 points per server + for hh := 0; hh < 40; hh++ { + hostkey := []byte(fmt.Sprintf("%s-%d", authority, hh)) + digest := md5.Sum(hostkey) // nolint: gosec + + for nn := 0; nn < 4; nn++ { + + var d1 = uint32(digest[3+nn*4]&0xff) << 24 + var d2 = uint32(digest[2+nn*4]&0xff) << 16 + var d3 = uint32(digest[1+nn*4]&0xff) << 8 + var d4 = uint32(digest[0+nn*4] & 0xff) + var point = d1 | d2 | d3 | d4 + + continuum.entries = append(continuum.entries, routeKetamaContinuum{ + point: point, + index: uint32(ss), + }) + } + } + } + + sort.Sort(ketamaSorter{continuum.entries}) + + return &continuum +} + +func (continuum ketamaContinuum) IsValid() bool { + return len(continuum.entries) > 0 +} + +func (continuum ketamaContinuum) nodeByHash(hash uint32) (int, error) { + var lowp = uint32(0) + var highp = uint32(len(continuum.entries)) + var maxp = highp + + if len(continuum.entries) <= 0 { + logErrorf("0-length ketama map! Mapping to node 0.") + return 0, errCliInternalError + } + + // Copied from libcouchbase vbucket.c (map_ketama) + for { + midp := lowp + (highp-lowp)/2 + if midp == maxp { + // Roll over to first entry + return int(continuum.entries[0].index), nil + } + + mid := continuum.entries[midp].point + var prev uint32 + if midp == 0 { + prev = 0 + } else { + prev = continuum.entries[midp-1].point + } + + if hash <= mid && hash > prev { + return int(continuum.entries[midp].index), nil + } + + if mid < hash { + lowp = midp + 1 + } else { + highp = midp - 1 + } + + if lowp > highp { + return int(continuum.entries[0].index), nil + } + } +} + +func (continuum ketamaContinuum) NodeByKey(key []byte) (int, error) { + return continuum.nodeByHash(ketamaHash(key)) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/kvmux.go b/vendor/github.com/couchbase/gocbcore/v9/kvmux.go new file mode 100644 index 000000000000..8d05cda214a6 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/kvmux.go @@ -0,0 +1,676 @@ +package gocbcore + +import ( + "container/list" + "errors" + "io" + "sort" + "sync/atomic" + "time" + "unsafe" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type kvFeatureVerifier interface { + HasDurabilityLevelStatus(status durabilityLevelStatus) bool + HasCreateAsDeletedStatus(status createAsDeletedStatus) bool +} + +type dispatcher interface { + DispatchDirect(req *memdQRequest) (PendingOp, error) + RequeueDirect(req *memdQRequest, isRetry bool) + DispatchDirectToAddress(req *memdQRequest, pipeline *memdPipeline) (PendingOp, error) + CollectionsEnabled() bool + SupportsCollections() bool + SetPostCompleteErrorHandler(handler postCompleteErrorHandler) +} + +type kvMux struct { + muxPtr unsafe.Pointer + + collectionsEnabled bool + queueSize int + poolSize int + cfgMgr *configManagementComponent + errMapMgr *errMapComponent + + tracer *tracerComponent + dialer *memdClientDialerComponent + + postCompleteErrHandler postCompleteErrorHandler +} + +type kvMuxProps struct { + CollectionsEnabled bool + QueueSize int + PoolSize int +} + +func newKVMux(props kvMuxProps, cfgMgr *configManagementComponent, errMapMgr *errMapComponent, tracer *tracerComponent, + dialer *memdClientDialerComponent) *kvMux { + mux := &kvMux{ + queueSize: props.QueueSize, + poolSize: props.PoolSize, + collectionsEnabled: props.CollectionsEnabled, + cfgMgr: cfgMgr, + errMapMgr: errMapMgr, + tracer: tracer, + dialer: dialer, + } + + cfgMgr.AddConfigWatcher(mux) + + return mux +} + +func (mux *kvMux) getState() *kvMuxState { + muxPtr := atomic.LoadPointer(&mux.muxPtr) + if muxPtr == nil { + return nil + } + + return (*kvMuxState)(muxPtr) +} + +func (mux *kvMux) updateState(old, new *kvMuxState) bool { + if new == nil { + logErrorf("Attempted to update to nil kvMuxState") + return false + } + + if old != nil { + return atomic.CompareAndSwapPointer(&mux.muxPtr, unsafe.Pointer(old), unsafe.Pointer(new)) + } + + if atomic.SwapPointer(&mux.muxPtr, unsafe.Pointer(new)) != nil { + logErrorf("Updated from nil attempted on initialized kvMuxState") + return false + } + + return true +} + +func (mux *kvMux) clear() *kvMuxState { + val := atomic.SwapPointer(&mux.muxPtr, nil) + return (*kvMuxState)(val) +} + +// This method MUST NEVER BLOCK due to its use from various contention points. +func (mux *kvMux) OnNewRouteConfig(cfg *routeConfig) { + oldMuxState := mux.getState() + newMuxState := mux.newKVMuxState(cfg) + + // Attempt to atomically update the routing data + if !mux.updateState(oldMuxState, newMuxState) { + logErrorf("Someone preempted the config update, skipping update") + return + } + + if oldMuxState == nil { + if newMuxState.revID > -1 && mux.collectionsEnabled && !newMuxState.collectionsSupported { + logDebugf("Collections disabled as unsupported") + } + // There is no existing muxer. We can simply start the new pipelines. + for _, pipeline := range newMuxState.pipelines { + pipeline.StartClients() + } + } else { + if !mux.collectionsEnabled { + // If collections just aren't enabled then we never need to refresh the connections because collections + // have come online. + mux.pipelineTakeover(oldMuxState, newMuxState) + } else if oldMuxState.collectionsSupported == newMuxState.collectionsSupported { + // Get the new muxer to takeover the pipelines from the older one + mux.pipelineTakeover(oldMuxState, newMuxState) + } else { + // Collections support has changed so we need to reconnect all connections in order to support the new + // state. + mux.reconnectPipelines(oldMuxState, newMuxState) + } + + mux.requeueRequests(oldMuxState) + } +} + +func (mux *kvMux) SetPostCompleteErrorHandler(handler postCompleteErrorHandler) { + mux.postCompleteErrHandler = handler +} + +func (mux *kvMux) ConfigRev() (int64, error) { + clientMux := mux.getState() + if clientMux == nil { + return 0, errShutdown + } + return clientMux.revID, nil +} + +func (mux *kvMux) ConfigUUID() string { + clientMux := mux.getState() + if clientMux == nil { + return "" + } + return clientMux.uuid +} + +func (mux *kvMux) KeyToVbucket(key []byte) (uint16, error) { + clientMux := mux.getState() + if clientMux == nil || clientMux.vbMap == nil { + return 0, errShutdown + } + + return clientMux.vbMap.VbucketByKey(key), nil +} + +func (mux *kvMux) NumReplicas() int { + clientMux := mux.getState() + if clientMux == nil { + return 0 + } + + if clientMux.vbMap == nil { + return 0 + } + + return clientMux.vbMap.NumReplicas() +} + +func (mux *kvMux) BucketType() bucketType { + clientMux := mux.getState() + if clientMux == nil { + return bktTypeInvalid + } + + return clientMux.bktType +} + +func (mux *kvMux) SupportsGCCCP() bool { + clientMux := mux.getState() + if clientMux == nil { + return false + } + + return clientMux.BucketType() == bktTypeNone +} + +func (mux *kvMux) NumPipelines() int { + clientMux := mux.getState() + if clientMux == nil { + return 0 + } + + return clientMux.NumPipelines() +} + +// CollectionsEnaled returns whether or not the kv mux was created with collections enabled. +func (mux *kvMux) CollectionsEnabled() bool { + return mux.collectionsEnabled +} + +// SupportsCollections returns whether or not collections are enabled AND supported by the server. +func (mux *kvMux) SupportsCollections() bool { + if !mux.collectionsEnabled { + return false + } + + clientMux := mux.getState() + if clientMux == nil { + return false + } + + return clientMux.collectionsSupported +} + +func (mux *kvMux) HasDurabilityLevelStatus(status durabilityLevelStatus) bool { + clientMux := mux.getState() + if clientMux == nil { + return false + } + + return clientMux.durabilityLevelStatus == status +} + +func (mux *kvMux) HasCreateAsDeletedStatus(status createAsDeletedStatus) bool { + clientMux := mux.getState() + if clientMux == nil { + return false + } + + return clientMux.createAsDeletedStatus == status +} + +func (mux *kvMux) RouteRequest(req *memdQRequest) (*memdPipeline, error) { + clientMux := mux.getState() + if clientMux == nil { + return nil, errShutdown + } + + // We haven't seen a valid config yet so put this in the dead pipeline so + // it'll get requeued once we do get a config. + if clientMux.revID == -1 { + return clientMux.deadPipe, nil + } + + var srvIdx int + repIdx := req.ReplicaIdx + + // Route to specific server + if repIdx < 0 { + srvIdx = -repIdx - 1 + } else { + var err error + + if clientMux.bktType == bktTypeCouchbase { + if req.Key != nil { + req.Vbucket = clientMux.vbMap.VbucketByKey(req.Key) + } + + srvIdx, err = clientMux.vbMap.NodeByVbucket(req.Vbucket, uint32(repIdx)) + + if err != nil { + return nil, err + } + } else if clientMux.bktType == bktTypeMemcached { + if repIdx > 0 { + // Error. Memcached buckets don't understand replicas! + return nil, errInvalidReplica + } + + if len(req.Key) == 0 { + // Non-broadcast keyless Memcached bucket request + return nil, errInvalidArgument + } + + srvIdx, err = clientMux.ketamaMap.NodeByKey(req.Key) + if err != nil { + return nil, err + } + } else if clientMux.bktType == bktTypeNone { + // This means that we're using GCCCP and not connected to a bucket + return nil, errGCCCPInUse + } + } + + return clientMux.GetPipeline(srvIdx), nil +} + +func (mux *kvMux) DispatchDirect(req *memdQRequest) (PendingOp, error) { + mux.tracer.StartCmdTrace(req) + req.dispatchTime = time.Now() + + for { + pipeline, err := mux.RouteRequest(req) + if err != nil { + return nil, err + } + + err = pipeline.SendRequest(req) + if err == errPipelineClosed { + continue + } else if err != nil { + if err == errPipelineFull { + err = errOverload + } + + shortCircuit, routeErr := mux.handleOpRoutingResp(nil, req, err) + if shortCircuit { + return req, nil + } + + return nil, routeErr + } + + break + } + + return req, nil +} + +func (mux *kvMux) RequeueDirect(req *memdQRequest, isRetry bool) { + mux.tracer.StartCmdTrace(req) + + handleError := func(err error) { + // We only want to log an error on retries if the error isn't cancelled. + if !isRetry || (isRetry && !errors.Is(err, ErrRequestCanceled)) { + logErrorf("Reschedule failed, failing request (%s)", err) + } + + req.tryCallback(nil, err) + } + + logDebugf("Request being requeued, Opaque=%d", req.Opaque) + + for { + pipeline, err := mux.RouteRequest(req) + if err != nil { + handleError(err) + return + } + + err = pipeline.RequeueRequest(req) + if err == errPipelineClosed { + continue + } else if err != nil { + handleError(err) + return + } + + break + } +} + +func (mux *kvMux) DispatchDirectToAddress(req *memdQRequest, pipeline *memdPipeline) (PendingOp, error) { + mux.tracer.StartCmdTrace(req) + req.dispatchTime = time.Now() + + // We set the ReplicaIdx to a negative number to ensure it is not redispatched + // and we check that it was 0 to begin with to ensure it wasn't miss-used. + if req.ReplicaIdx != 0 { + return nil, errInvalidReplica + } + req.ReplicaIdx = -999999999 + + for { + err := pipeline.SendRequest(req) + if err == errPipelineClosed { + continue + } else if err != nil { + if err == errPipelineFull { + err = errOverload + } + + shortCircuit, routeErr := mux.handleOpRoutingResp(nil, req, err) + if shortCircuit { + return req, nil + } + + return nil, routeErr + } + + break + } + + return req, nil +} + +func (mux *kvMux) Close() error { + mux.cfgMgr.RemoveConfigWatcher(mux) + clientMux := mux.clear() + + if clientMux == nil { + return errShutdown + } + + var muxErr error + // Shut down the client multiplexer which will close all its queues + // effectively causing all the clients to shut down. + for _, pipeline := range clientMux.pipelines { + err := pipeline.Close() + if err != nil { + logErrorf("failed to shut down pipeline: %s", err) + muxErr = errCliInternalError + } + } + + if clientMux.deadPipe != nil { + err := clientMux.deadPipe.Close() + if err != nil { + logErrorf("failed to shut down deadpipe: %s", err) + muxErr = errCliInternalError + } + } + + // Drain all the pipelines and error their requests, then + // drain the dead queue and error those requests. + cb := func(req *memdQRequest) { + req.tryCallback(nil, errShutdown) + } + + mux.drainPipelines(clientMux, cb) + + return muxErr +} + +func (mux *kvMux) handleOpRoutingResp(resp *memdQResponse, req *memdQRequest, err error) (bool, error) { + // If there is no error, we should return immediately + if err == nil { + return false, nil + } + + // If this operation has been cancelled, we just fail immediately. + if errors.Is(err, ErrRequestCanceled) || errors.Is(err, ErrTimeout) { + return false, err + } + + err = translateMemdError(err, req) + + // Handle potentially retrying the operation + if errors.Is(err, ErrNotMyVBucket) { + if mux.handleNotMyVbucket(resp, req) { + return true, nil + } + } else if errors.Is(err, ErrDocumentLocked) { + if mux.waitAndRetryOperation(req, KVLockedRetryReason) { + return true, nil + } + } else if errors.Is(err, ErrTemporaryFailure) { + if mux.waitAndRetryOperation(req, KVTemporaryFailureRetryReason) { + return true, nil + } + } else if errors.Is(err, ErrDurableWriteInProgress) { + if mux.waitAndRetryOperation(req, KVSyncWriteInProgressRetryReason) { + return true, nil + } + } else if errors.Is(err, ErrDurableWriteReCommitInProgress) { + if mux.waitAndRetryOperation(req, KVSyncWriteRecommitInProgressRetryReason) { + return true, nil + } + } else if errors.Is(err, io.EOF) { + if mux.waitAndRetryOperation(req, SocketNotAvailableRetryReason) { + return true, nil + } + } else if errors.Is(err, io.ErrShortWrite) { + // This is a special case where the write has failed on the underlying connection and not all of the bytes + // were written to the network. + if mux.waitAndRetryOperation(req, MemdWriteFailure) { + return true, nil + } + + } + + if resp != nil && resp.Magic == memd.CmdMagicRes { + shouldRetry := mux.errMapMgr.ShouldRetry(resp.Status) + if shouldRetry { + if mux.waitAndRetryOperation(req, KVErrMapRetryReason) { + return true, nil + } + } + } + + err = mux.errMapMgr.EnhanceKvError(err, resp, req) + + if mux.postCompleteErrHandler == nil { + return false, err + } + + return mux.postCompleteErrHandler(resp, req, err) +} + +func (mux *kvMux) waitAndRetryOperation(req *memdQRequest, reason RetryReason) bool { + shouldRetry, retryTime := retryOrchMaybeRetry(req, reason) + if shouldRetry { + go func() { + time.Sleep(time.Until(retryTime)) + mux.RequeueDirect(req, true) + }() + return true + } + + return false +} + +func (mux *kvMux) handleNotMyVbucket(resp *memdQResponse, req *memdQRequest) bool { + // Grab just the hostname from the source address + sourceHost, err := hostFromHostPort(resp.sourceAddr) + if err != nil { + logErrorf("NMV response source address was invalid, skipping config update") + } else { + // Try to parse the value as a bucket configuration + bk, err := parseConfig(resp.Value, sourceHost) + if err == nil { + // We need to push this upstream which will then update us with a new config. + mux.cfgMgr.OnNewConfig(bk) + } + } + + // Redirect it! This may actually come back to this server, but I won't tell + // if you don't ;) + return mux.waitAndRetryOperation(req, KVNotMyVBucketRetryReason) +} + +func (mux *kvMux) drainPipelines(clientMux *kvMuxState, cb func(req *memdQRequest)) { + for _, pipeline := range clientMux.pipelines { + logDebugf("Draining queue %+v", pipeline) + pipeline.Drain(cb) + } + if clientMux.deadPipe != nil { + clientMux.deadPipe.Drain(cb) + } +} + +func (mux *kvMux) newKVMuxState(cfg *routeConfig) *kvMuxState { + poolSize := 1 + if !cfg.IsGCCCPConfig() { + poolSize = mux.poolSize + } + + pipelines := make([]*memdPipeline, len(cfg.kvServerList)) + for i, hostPort := range cfg.kvServerList { + hostPort := hostPort + + getCurClientFn := func() (*memdClient, error) { + return mux.dialer.SlowDialMemdClient(hostPort, mux.handleOpRoutingResp) + } + pipeline := newPipeline(hostPort, poolSize, mux.queueSize, getCurClientFn) + + pipelines[i] = pipeline + } + + return newKVMuxState(cfg, pipelines, newDeadPipeline(mux.queueSize)) +} + +func (mux *kvMux) reconnectPipelines(oldMuxState *kvMuxState, newMuxState *kvMuxState) { + for _, pipeline := range oldMuxState.pipelines { + err := pipeline.Close() + if err != nil { + logErrorf("failed to shut down pipeline: %s", err) + } + } + + err := oldMuxState.deadPipe.Close() + if err != nil { + logErrorf("Failed to properly close abandoned dead pipe (%s)", err) + } + + for _, pipeline := range newMuxState.pipelines { + pipeline.StartClients() + } +} + +func (mux *kvMux) requeueRequests(oldMuxState *kvMuxState) { + // Gather all the requests from all the old pipelines and then + // sort and redispatch them (which will use the new pipelines) + var requestList []*memdQRequest + mux.drainPipelines(oldMuxState, func(req *memdQRequest) { + requestList = append(requestList, req) + }) + + sort.Sort(memdQRequestSorter(requestList)) + + for _, req := range requestList { + stopCmdTrace(req) + mux.RequeueDirect(req, false) + } +} + +func (mux *kvMux) pipelineTakeover(oldMux, newMux *kvMuxState) { + oldPipelines := list.New() + + // Gather all our old pipelines up for takeover and what not + if oldMux != nil { + for _, pipeline := range oldMux.pipelines { + oldPipelines.PushBack(pipeline) + } + } + + // Build a function to find an existing pipeline + stealPipeline := func(address string) *memdPipeline { + for e := oldPipelines.Front(); e != nil; e = e.Next() { + pipeline, ok := e.Value.(*memdPipeline) + if !ok { + logErrorf("Failed to cast old pipeline") + continue + } + + if pipeline.Address() == address { + oldPipelines.Remove(e) + return pipeline + } + } + + return nil + } + + // Initialize new pipelines (possibly with a takeover) + for _, pipeline := range newMux.pipelines { + oldPipeline := stealPipeline(pipeline.Address()) + if oldPipeline != nil { + pipeline.Takeover(oldPipeline) + } + + pipeline.StartClients() + } + + // Shut down any pipelines that were not taken over + for e := oldPipelines.Front(); e != nil; e = e.Next() { + pipeline, ok := e.Value.(*memdPipeline) + if !ok { + logErrorf("Failed to cast old pipeline") + continue + } + + err := pipeline.Close() + if err != nil { + logErrorf("Failed to properly close abandoned pipeline (%s)", err) + } + } + + if oldMux != nil && oldMux.deadPipe != nil { + err := oldMux.deadPipe.Close() + if err != nil { + logErrorf("Failed to properly close abandoned dead pipe (%s)", err) + } + } +} + +func (mux *kvMux) PipelineSnapshot() (*pipelineSnapshot, error) { + clientMux := mux.getState() + if clientMux == nil { + return nil, errShutdown + } + + return &pipelineSnapshot{ + state: clientMux, + }, nil +} + +func (mux *kvMux) ConfigSnapshot() (*ConfigSnapshot, error) { + clientMux := mux.getState() + if clientMux == nil { + return nil, errShutdown + } + + return &ConfigSnapshot{ + state: clientMux, + }, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/kvmuxstate.go b/vendor/github.com/couchbase/gocbcore/v9/kvmuxstate.go new file mode 100644 index 000000000000..78f0a689c1bb --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/kvmuxstate.go @@ -0,0 +1,91 @@ +package gocbcore + +import ( + "fmt" +) + +type kvMuxState struct { + pipelines []*memdPipeline + deadPipe *memdPipeline + + kvServerList []string + bktType bucketType + vbMap *vbucketMap + ketamaMap *ketamaContinuum + uuid string + revID int64 + + durabilityLevelStatus durabilityLevelStatus + createAsDeletedStatus createAsDeletedStatus + collectionsSupported bool +} + +func newKVMuxState(cfg *routeConfig, pipelines []*memdPipeline, deadpipe *memdPipeline) *kvMuxState { + mux := &kvMuxState{ + pipelines: pipelines, + deadPipe: deadpipe, + + kvServerList: cfg.kvServerList, + bktType: cfg.bktType, + vbMap: cfg.vbMap, + ketamaMap: cfg.ketamaMap, + uuid: cfg.uuid, + revID: cfg.revID, + + durabilityLevelStatus: durabilityLevelStatusUnknown, + createAsDeletedStatus: createAsDeletedStatusUnknown, + + collectionsSupported: cfg.ContainsBucketCapability("collections"), + } + + // We setup with a fake config, this means that durability support is still unknown. + if cfg.revID > -1 { + if cfg.ContainsBucketCapability("durableWrite") { + mux.durabilityLevelStatus = durabilityLevelStatusSupported + } else { + mux.durabilityLevelStatus = durabilityLevelStatusUnsupported + } + + if cfg.ContainsBucketCapability("tombstonedUserXAttrs") { + mux.createAsDeletedStatus = createAsDeletedStatusSupported + } else { + mux.createAsDeletedStatus = createAsDeletedStatusUnsupported + } + } + + return mux +} + +func (mux *kvMuxState) BucketType() bucketType { + return mux.bktType +} + +func (mux *kvMuxState) NumPipelines() int { + return len(mux.pipelines) +} + +func (mux *kvMuxState) GetPipeline(index int) *memdPipeline { + if index < 0 || index >= len(mux.pipelines) { + return mux.deadPipe + } + return mux.pipelines[index] +} + +// nolint: unused +func (mux *kvMuxState) debugString() string { + var outStr string + + for i, n := range mux.pipelines { + outStr += fmt.Sprintf("Pipeline %d:\n", i) + outStr += reindentLog(" ", n.debugString()) + "\n" + } + + outStr += "Dead Pipeline:\n" + if mux.deadPipe != nil { + outStr += reindentLog(" ", mux.deadPipe.debugString()) + "\n" + } else { + outStr += " Disabled\n" + } + + return outStr +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/logging.go b/vendor/github.com/couchbase/gocbcore/v9/logging.go new file mode 100644 index 000000000000..d0ddfea1d2ae --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/logging.go @@ -0,0 +1,190 @@ +package gocbcore + +import ( + "fmt" + "log" + "os" + "strings" +) + +// LogLevel specifies the severity of a log message. +type LogLevel int + +// Various logging levels (or subsystems) which can categorize the message. +// Currently these are ordered in decreasing severity. +const ( + LogError LogLevel = iota + LogWarn + LogInfo + LogDebug + LogTrace + LogSched + LogMaxVerbosity +) + +func redactUserData(v interface{}) string { + return fmt.Sprintf("%v", v) +} + +func redactMetaData(v interface{}) string { + return fmt.Sprintf("%v", v) +} + +func redactSystemData(v interface{}) string { + return fmt.Sprintf("%v", v) +} + +// LogRedactLevel specifies the degree with which to redact the logs. +type LogRedactLevel int + +const ( + // RedactNone indicates to perform no redactions + RedactNone LogRedactLevel = iota + + // RedactPartial indicates to redact all possible user-identifying information from logs. + RedactPartial + + // RedactFull indicates to fully redact all possible identifying information from logs. + RedactFull +) + +// SetLogRedactionLevel specifies the level with which logs should be redacted. +func SetLogRedactionLevel(level LogRedactLevel) { + globalLogRedactionLevel = level +} + +func isLogRedactionLevelNone() bool { + return globalLogRedactionLevel == RedactNone +} + +func isLogRedactionLevelPartial() bool { + return globalLogRedactionLevel == RedactPartial +} + +func isLogRedactionLevelFull() bool { + return globalLogRedactionLevel == RedactFull +} + +func logLevelToString(level LogLevel) string { + switch level { + case LogError: + return "error" + case LogWarn: + return "warn" + case LogInfo: + return "info" + case LogDebug: + return "debug" + case LogTrace: + return "trace" + case LogSched: + return "sched" + } + + return fmt.Sprintf("unknown (%d)", level) +} + +// Logger defines a logging interface. You can either use one of the default loggers +// (DefaultStdioLogger(), VerboseStdioLogger()) or implement your own. +type Logger interface { + // Outputs logging information: + // level is the verbosity level + // offset is the position within the calling stack from which the message + // originated. This is useful for contextual loggers which retrieve file/line + // information. + Log(level LogLevel, offset int, format string, v ...interface{}) error +} + +type defaultLogger struct { + Level LogLevel + GoLogger *log.Logger +} + +func (l *defaultLogger) Log(level LogLevel, offset int, format string, v ...interface{}) error { + if level > l.Level { + return nil + } + s := fmt.Sprintf(format, v...) + return l.GoLogger.Output(offset+2, s) +} + +var ( + globalDefaultLogger = defaultLogger{ + GoLogger: log.New(os.Stderr, "GOCB ", log.Lmicroseconds|log.Lshortfile), Level: LogDebug, + } + + globalVerboseLogger = defaultLogger{ + GoLogger: globalDefaultLogger.GoLogger, Level: LogMaxVerbosity, + } + + globalLogger Logger + globalLogRedactionLevel LogRedactLevel +) + +// DefaultStdioLogger gets the default standard I/O logger. +// gocbcore.SetLogger(gocbcore.DefaultStdioLogger()) +func DefaultStdioLogger() Logger { + return &globalDefaultLogger +} + +// VerboseStdioLogger is a more verbose level of DefaultStdioLogger(). Messages +// pertaining to the scheduling of ordinary commands (and their responses) will +// also be emitted. +// gocbcore.SetLogger(gocbcore.VerboseStdioLogger()) +func VerboseStdioLogger() Logger { + return &globalVerboseLogger +} + +// SetLogger sets a logger to be used by the library. A logger can be obtained via +// the DefaultStdioLogger() or VerboseStdioLogger() functions. You can also implement +// your own logger using the Logger interface. +func SetLogger(logger Logger) { + globalLogger = logger +} + +type redactableLogValue interface { + redacted() interface{} +} + +func logExf(level LogLevel, offset int, format string, v ...interface{}) { + if globalLogger != nil { + if level <= LogInfo && !isLogRedactionLevelNone() { + // We only redact at info level or below. + for i, iv := range v { + if redactable, ok := iv.(redactableLogValue); ok { + v[i] = redactable.redacted() + } + } + } + + err := globalLogger.Log(level, offset+1, format, v...) + if err != nil { + log.Printf("Logger error occurred (%s)\n", err) + } + } +} + +func logDebugf(format string, v ...interface{}) { + logExf(LogDebug, 1, format, v...) +} + +func logSchedf(format string, v ...interface{}) { + logExf(LogSched, 1, format, v...) +} + +func logWarnf(format string, v ...interface{}) { + logExf(LogWarn, 1, format, v...) +} + +func logErrorf(format string, v ...interface{}) { + logExf(LogError, 1, format, v...) +} + +func logInfof(format string, v ...interface{}) { + logExf(LogInfo, 1, format, v...) +} + +func reindentLog(indent, message string) string { + reindentedMessage := strings.Replace(message, "\n", "\n"+indent, -1) + return fmt.Sprintf("%s%s", indent, reindentedMessage) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/README.md b/vendor/github.com/couchbase/gocbcore/v9/memd/README.md new file mode 100644 index 000000000000..59d84e1447bf --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/README.md @@ -0,0 +1 @@ +This memd library should be moved into gocbcore! \ No newline at end of file diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/cidsupporttable.go b/vendor/github.com/couchbase/gocbcore/v9/memd/cidsupporttable.go new file mode 100644 index 000000000000..a83efb267608 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/cidsupporttable.go @@ -0,0 +1,65 @@ +package memd + +var cidSupportedOps = []CmdCode{ + CmdGet, + CmdSet, + CmdAdd, + CmdReplace, + CmdDelete, + CmdIncrement, + CmdDecrement, + CmdAppend, + CmdPrepend, + CmdTouch, + CmdGAT, + CmdGetReplica, + CmdGetLocked, + CmdUnlockKey, + CmdGetMeta, + CmdSetMeta, + CmdDelMeta, + CmdSubDocGet, + CmdSubDocExists, + CmdSubDocDictAdd, + CmdSubDocDictSet, + CmdSubDocDelete, + CmdSubDocReplace, + CmdSubDocArrayPushLast, + CmdSubDocArrayPushFirst, + CmdSubDocArrayInsert, + CmdSubDocArrayAddUnique, + CmdSubDocCounter, + CmdSubDocMultiLookup, + CmdSubDocMultiMutation, + CmdSubDocGetCount, + CmdDcpMutation, + CmdDcpExpiration, + CmdDcpDeletion, +} + +func makeCidSupportedTable() []bool { + var cidTableLen uint32 + for _, cmd := range cidSupportedOps { + if uint32(cmd) >= cidTableLen { + cidTableLen = uint32(cmd) + 1 + } + } + cidTable := make([]bool, cidTableLen) + for _, cmd := range cidSupportedOps { + cidTable[cmd] = true + } + return cidTable +} + +var cidSupportedTable = makeCidSupportedTable() + +// IsCommandCollectionEncoded returns whether a particular command code +// should have its key collection encoded when collections support is +// enabled for a particular connection +func IsCommandCollectionEncoded(cmd CmdCode) bool { + cmdIdx := int(cmd) + if cmdIdx < 0 || cmdIdx >= len(cidSupportedTable) { + return false + } + return cidSupportedTable[cmdIdx] +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/cmdcode.go b/vendor/github.com/couchbase/gocbcore/v9/memd/cmdcode.go new file mode 100644 index 000000000000..0759e2cd3391 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/cmdcode.go @@ -0,0 +1,201 @@ +package memd + +import "encoding/hex" + +// CmdCode represents the specific command the packet is performing. +type CmdCode uint8 + +// These constants provide predefined values for all the operations +// which are supported by this library. +const ( + CmdGet = CmdCode(0x00) + CmdSet = CmdCode(0x01) + CmdAdd = CmdCode(0x02) + CmdReplace = CmdCode(0x03) + CmdDelete = CmdCode(0x04) + CmdIncrement = CmdCode(0x05) + CmdDecrement = CmdCode(0x06) + CmdNoop = CmdCode(0x0a) + CmdAppend = CmdCode(0x0e) + CmdPrepend = CmdCode(0x0f) + CmdStat = CmdCode(0x10) + CmdTouch = CmdCode(0x1c) + CmdGAT = CmdCode(0x1d) + CmdHello = CmdCode(0x1f) + CmdSASLListMechs = CmdCode(0x20) + CmdSASLAuth = CmdCode(0x21) + CmdSASLStep = CmdCode(0x22) + CmdGetAllVBSeqnos = CmdCode(0x48) + CmdDcpOpenConnection = CmdCode(0x50) + CmdDcpAddStream = CmdCode(0x51) + CmdDcpCloseStream = CmdCode(0x52) + CmdDcpStreamReq = CmdCode(0x53) + CmdDcpGetFailoverLog = CmdCode(0x54) + CmdDcpStreamEnd = CmdCode(0x55) + CmdDcpSnapshotMarker = CmdCode(0x56) + CmdDcpMutation = CmdCode(0x57) + CmdDcpDeletion = CmdCode(0x58) + CmdDcpExpiration = CmdCode(0x59) + CmdDcpSeqNoAdvanced = CmdCode(0x64) + CmdDcpOsoSnapshot = CmdCode(0x65) + CmdDcpFlush = CmdCode(0x5a) + CmdDcpSetVbucketState = CmdCode(0x5b) + CmdDcpNoop = CmdCode(0x5c) + CmdDcpBufferAck = CmdCode(0x5d) + CmdDcpControl = CmdCode(0x5e) + CmdDcpEvent = CmdCode(0x5f) + CmdGetReplica = CmdCode(0x83) + CmdSelectBucket = CmdCode(0x89) + CmdObserveSeqNo = CmdCode(0x91) + CmdObserve = CmdCode(0x92) + CmdGetLocked = CmdCode(0x94) + CmdUnlockKey = CmdCode(0x95) + CmdGetMeta = CmdCode(0xa0) + CmdSetMeta = CmdCode(0xa2) + CmdDelMeta = CmdCode(0xa8) + CmdGetClusterConfig = CmdCode(0xb5) + CmdGetRandom = CmdCode(0xb6) + CmdCollectionsGetManifest = CmdCode(0xba) + CmdCollectionsGetID = CmdCode(0xbb) + CmdSubDocGet = CmdCode(0xc5) + CmdSubDocExists = CmdCode(0xc6) + CmdSubDocDictAdd = CmdCode(0xc7) + CmdSubDocDictSet = CmdCode(0xc8) + CmdSubDocDelete = CmdCode(0xc9) + CmdSubDocReplace = CmdCode(0xca) + CmdSubDocArrayPushLast = CmdCode(0xcb) + CmdSubDocArrayPushFirst = CmdCode(0xcc) + CmdSubDocArrayInsert = CmdCode(0xcd) + CmdSubDocArrayAddUnique = CmdCode(0xce) + CmdSubDocCounter = CmdCode(0xcf) + CmdSubDocMultiLookup = CmdCode(0xd0) + CmdSubDocMultiMutation = CmdCode(0xd1) + CmdSubDocGetCount = CmdCode(0xd2) + CmdGetErrorMap = CmdCode(0xfe) +) + +// Name returns the string representation of the CmdCode. +func (command CmdCode) Name() string { + switch command { + case CmdGet: + return "CMD_GET" + case CmdSet: + return "CMD_SET" + case CmdAdd: + return "CMD_ADD" + case CmdReplace: + return "CMD_REPLACE" + case CmdDelete: + return "CMD_DELETE" + case CmdIncrement: + return "CMD_INCREMENT" + case CmdDecrement: + return "CMD_DECREMENT" + case CmdNoop: + return "CMD_NOOP" + case CmdAppend: + return "CMD_APPEND" + case CmdPrepend: + return "CMD_PREPEND" + case CmdStat: + return "CMD_STAT" + case CmdTouch: + return "CMD_TOUCH" + case CmdGAT: + return "CMD_GAT" + case CmdHello: + return "CMD_HELLO" + case CmdSASLListMechs: + return "CMD_SASLLISTMECHS" + case CmdSASLAuth: + return "CMD_SASLAUTH" + case CmdSASLStep: + return "CMD_SASLSTEP" + case CmdGetAllVBSeqnos: + return "CMD_GETALLVBSEQNOS" + case CmdDcpOpenConnection: + return "CMD_DCPOPENCONNECTION" + case CmdDcpAddStream: + return "CMD_DCPADDSTREAM" + case CmdDcpCloseStream: + return "CMD_DCPCLOSESTREAM" + case CmdDcpStreamReq: + return "CMD_DCPSTREAMREQ" + case CmdDcpGetFailoverLog: + return "CMD_DCPGETFAILOVERLOG" + case CmdDcpStreamEnd: + return "CMD_DCPSTREAMEND" + case CmdDcpSnapshotMarker: + return "CMD_DCPSNAPSHOTMARKER" + case CmdDcpMutation: + return "CMD_DCPMUTATION" + case CmdDcpDeletion: + return "CMD_DCPDELETION" + case CmdDcpExpiration: + return "CMD_DCPEXPIRATION" + case CmdDcpFlush: + return "CMD_DCPFLUSH" + case CmdDcpSetVbucketState: + return "CMD_DCPSETVBUCKETSTATE" + case CmdDcpNoop: + return "CMD_DCPNOOP" + case CmdDcpBufferAck: + return "CMD_DCPBUFFERACK" + case CmdDcpControl: + return "CMD_DCPCONTROL" + case CmdGetReplica: + return "CMD_GETREPLICA" + case CmdSelectBucket: + return "CMD_SELECTBUCKET" + case CmdObserveSeqNo: + return "CMD_OBSERVESEQNO" + case CmdObserve: + return "CMD_OBSERVE" + case CmdGetLocked: + return "CMD_GETLOCKED" + case CmdUnlockKey: + return "CMD_UNLOCKKEY" + case CmdGetMeta: + return "CMD_GETMETA" + case CmdSetMeta: + return "CMD_SETMETA" + case CmdDelMeta: + return "CMD_DELMETA" + case CmdGetClusterConfig: + return "CMD_GETCLUSTERCONFIG" + case CmdGetRandom: + return "CMD_GETRANDOM" + case CmdSubDocGet: + return "CMD_SUBDOCGET" + case CmdSubDocExists: + return "CMD_SUBDOCEXISTS" + case CmdSubDocDictAdd: + return "CMD_SUBDOCDICTADD" + case CmdSubDocDictSet: + return "CMD_SUBDOCDICTSET" + case CmdSubDocDelete: + return "CMD_SUBDOCDELETE" + case CmdSubDocReplace: + return "CMD_SUBDOCREPLACE" + case CmdSubDocArrayPushLast: + return "CMD_SUBDOCARRAYPUSHLAST" + case CmdSubDocArrayPushFirst: + return "CMD_SUBDOCARRAYPUSHFIRST" + case CmdSubDocArrayInsert: + return "CMD_SUBDOCARRAYINSERT" + case CmdSubDocArrayAddUnique: + return "CMD_SUBDOCARRAYADDUNIQUE" + case CmdSubDocCounter: + return "CMD_SUBDOCCOUNTER" + case CmdSubDocMultiLookup: + return "CMD_SUBDOCMULTILOOKUP" + case CmdSubDocMultiMutation: + return "CMD_SUBDOCMULTIMUTATION" + case CmdSubDocGetCount: + return "CMD_SUBDOCGETCOUNT" + case CmdGetErrorMap: + return "CMD_GETERRORMAP" + default: + return "CMD_x" + hex.EncodeToString([]byte{byte(command)}) + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/conn.go b/vendor/github.com/couchbase/gocbcore/v9/memd/conn.go new file mode 100644 index 000000000000..ef73057505af --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/conn.go @@ -0,0 +1,446 @@ +package memd + +import ( + "encoding/binary" + "errors" + "io" + "time" +) + +// Conn represents a memcached protocol connection. +type Conn struct { + stream io.ReadWriter + + headerBuf []byte + enabledFeatures map[HelloFeature]bool +} + +// NewConn creates a new connection object which can be used to perform +// reading and writing of packets. +func NewConn(stream io.ReadWriter) *Conn { + return &Conn{ + stream: stream, + headerBuf: make([]byte, 24), + enabledFeatures: make(map[HelloFeature]bool), + } +} + +// EnableFeature enables a particular feature on this connection. +func (c *Conn) EnableFeature(feature HelloFeature) { + c.enabledFeatures[feature] = true +} + +// IsFeatureEnabled indicates whether a particular feature is enabled +// on this particular connection. Note that this is directly based on +// calls to EnableFeature and is not controlled by the library. +func (c *Conn) IsFeatureEnabled(feature HelloFeature) bool { + if enabled, ok := c.enabledFeatures[feature]; ok { + return enabled + } + return false +} + +// WritePacket writes a packet to the network. +func (c *Conn) WritePacket(pkt *Packet) error { + encodedKey := pkt.Key + extras := pkt.Extras + if c.IsFeatureEnabled(FeatureCollections) { + if pkt.Command == CmdObserve { + // While it's possible that the Observe operation is in fact supported with collections + // enabled, we don't currently implement that operation for simplicity, as the key is + // actually hidden away in the value data instead of the usual key data. + return errors.New("the observe operation is not supported with collections enabled") + } + + if IsCommandCollectionEncoded(pkt.Command) { + collEncodedKey := make([]byte, 0, len(encodedKey)+5) + collEncodedKey = AppendULEB128_32(collEncodedKey, pkt.CollectionID) + collEncodedKey = append(collEncodedKey, encodedKey...) + encodedKey = collEncodedKey + } else if pkt.Command == CmdGetRandom { + // GetRandom expects the cid to be in the extras + // GetRandom MUST not have any extras if not using collections so we're ok to just set it. + // It also doesn't expect the collection ID to be leb encoded. + extras = make([]byte, 4) + binary.BigEndian.PutUint32(extras, pkt.CollectionID) + } else { + if pkt.CollectionID > 0 { + return errors.New("cannot encode collection id with a non-collection command") + } + } + } else { + if pkt.CollectionID > 0 { + return errors.New("cannot encode collection id without the feature enabled") + } + } + + extLen := len(extras) + keyLen := len(encodedKey) + valLen := len(pkt.Value) + + framesLen := 0 + if pkt.BarrierFrame != nil { + framesLen++ + } + if pkt.DurabilityLevelFrame != nil { + if pkt.DurabilityTimeoutFrame == nil { + framesLen += 2 + } else { + framesLen += 4 + } + } + if pkt.StreamIDFrame != nil { + framesLen += 3 + } + if pkt.OpenTracingFrame != nil { + traceCtxLen := len(pkt.OpenTracingFrame.TraceContext) + if traceCtxLen < 15 { + framesLen += 1 + traceCtxLen + } else { + framesLen += 2 + traceCtxLen + } + } + if pkt.ServerDurationFrame != nil { + framesLen += 3 + } + + // We automatically upgrade a packet from normal Req or Res magic into + // the frame variant depending on the usage of them. + pktMagic := pkt.Magic + if framesLen > 0 { + if pktMagic == CmdMagicReq { + if !c.IsFeatureEnabled(FeatureAltRequests) { + return errors.New("cannot use frames in req packets without enabling the feature") + } + + pktMagic = cmdMagicReqExt + } else if pktMagic == CmdMagicRes { + pktMagic = cmdMagicResExt + } else { + return errors.New("cannot use frames with an unsupported magic") + } + } + + // Go appears to do some clever things in regards to writing data + // to the kernel for network dispatch. Having a write buffer + // per-server that is re-used actually hinders performance... + // For now, we will simply create a new buffer and let it be GC'd. + buffer := make([]byte, 24+keyLen+extLen+valLen+framesLen) + + buffer[0] = uint8(pktMagic) + buffer[1] = uint8(pkt.Command) + + // This is safe to do without checking the magic as we check the magic + // above before incrementing the framesLen variable + if framesLen > 0 { + buffer[2] = uint8(framesLen) + buffer[3] = uint8(keyLen) + } else { + binary.BigEndian.PutUint16(buffer[2:], uint16(keyLen)) + } + buffer[4] = byte(extLen) + buffer[5] = pkt.Datatype + + if pkt.Magic == CmdMagicReq { + if pkt.Status != 0 { + return errors.New("cannot specify status in a request packet") + } + + binary.BigEndian.PutUint16(buffer[6:], pkt.Vbucket) + } else if pkt.Magic == CmdMagicRes { + if pkt.Vbucket != 0 { + return errors.New("cannot specify vbucket in a response packet") + } + + binary.BigEndian.PutUint16(buffer[6:], uint16(pkt.Status)) + } else { + return errors.New("cannot encode status/vbucket for unknown packet magic") + } + + binary.BigEndian.PutUint32(buffer[8:], uint32(len(buffer)-24)) + binary.BigEndian.PutUint32(buffer[12:], pkt.Opaque) + binary.BigEndian.PutUint64(buffer[16:], pkt.Cas) + + bodyPos := 24 + + // Generate the framing extra data + + // makeFrameHeader will take a FrameType and len and then encode it into a 4:4 bit + // frame header. Note that this does not account for sizing overruns as this is meant + // to be done by the specific commands. + makeFrameHeader := func(ftype frameType, len uint8) uint8 { + ftypeNum := uint8(ftype) + return (ftypeNum << 4) | (len << 0) + } + + if pkt.BarrierFrame != nil { + if pkt.Magic != CmdMagicReq { + return errors.New("cannot use barrier frame in non-request packets") + } + + buffer[bodyPos] = makeFrameHeader(frameTypeReqBarrier, 0) + bodyPos++ + } + if pkt.DurabilityLevelFrame != nil || pkt.DurabilityTimeoutFrame != nil { + if pkt.Magic != CmdMagicReq { + return errors.New("cannot use durability level frame in non-request packets") + } + if !c.IsFeatureEnabled(FeatureSyncReplication) { + return errors.New("cannot use sync replication frames without enabling the feature") + } + if pkt.DurabilityLevelFrame == nil && pkt.DurabilityTimeoutFrame != nil { + return errors.New("cannot encode durability timeout frame without durability level frame") + } + + if pkt.DurabilityTimeoutFrame == nil { + buffer[bodyPos+0] = makeFrameHeader(frameTypeReqSyncDurability, 1) + buffer[bodyPos+1] = uint8(pkt.DurabilityLevelFrame.DurabilityLevel) + bodyPos += 2 + } else { + durabilityTimeoutMillis := pkt.DurabilityTimeoutFrame.DurabilityTimeout / time.Millisecond + if durabilityTimeoutMillis > 65535 { + durabilityTimeoutMillis = 65535 + } + + buffer[bodyPos+0] = makeFrameHeader(frameTypeReqSyncDurability, 3) + buffer[bodyPos+1] = uint8(pkt.DurabilityLevelFrame.DurabilityLevel) + binary.BigEndian.PutUint16(buffer[bodyPos+2:], uint16(durabilityTimeoutMillis)) + bodyPos += 4 + } + } + if pkt.StreamIDFrame != nil { + if pkt.Magic != CmdMagicReq { + return errors.New("cannot use stream id frame in non-request packets") + } + + buffer[bodyPos+0] = makeFrameHeader(frameTypeReqStreamID, 2) + binary.BigEndian.PutUint16(buffer[bodyPos+1:], pkt.StreamIDFrame.StreamID) + bodyPos += 3 + } + if pkt.OpenTracingFrame != nil { + if pkt.Magic != CmdMagicReq { + return errors.New("cannot use open tracing frame in non-request packets") + } + if !c.IsFeatureEnabled(FeatureOpenTracing) { + return errors.New("cannot use open tracing frames without enabling the feature") + } + + traceCtxLen := len(pkt.OpenTracingFrame.TraceContext) + if traceCtxLen < 15 { + buffer[bodyPos+0] = makeFrameHeader(frameTypeReqOpenTracing, uint8(traceCtxLen)) + copy(buffer[bodyPos+1:], pkt.OpenTracingFrame.TraceContext) + bodyPos += 1 + traceCtxLen + } else { + buffer[bodyPos+0] = makeFrameHeader(frameTypeReqOpenTracing, 15) + buffer[bodyPos+1] = uint8(traceCtxLen - 15) + copy(buffer[bodyPos+2:], pkt.OpenTracingFrame.TraceContext) + bodyPos += 2 + traceCtxLen + } + } + + if pkt.ServerDurationFrame != nil { + if pkt.Magic != CmdMagicRes { + return errors.New("cannot use server duration frame in non-response packets") + } + if !c.IsFeatureEnabled(FeatureDurations) { + return errors.New("cannot use server duration frames without enabling the feature") + } + + serverDurationEnc := EncodeSrvDura16(pkt.ServerDurationFrame.ServerDuration) + + buffer[bodyPos+0] = makeFrameHeader(frameTypeResSrvDuration, 2) + binary.BigEndian.PutUint16(buffer[bodyPos+1:], serverDurationEnc) + bodyPos += 3 + } + + if len(pkt.UnsupportedFrames) > 0 { + return errors.New("cannot send packets with unsupported frames") + } + + // Copy the extras into the body of the packet + copy(buffer[bodyPos:], extras) + bodyPos += len(extras) + + // Copy the encoded key into the body of the packet + copy(buffer[bodyPos:], encodedKey) + bodyPos += len(encodedKey) + + // Copy the value into the body of the packet + copy(buffer[bodyPos:], pkt.Value) + + bytesWritten, err := c.stream.Write(buffer) + if err != nil { + return err + } + if bytesWritten != len(buffer) { + return io.ErrShortWrite + } + + return nil +} + +// ReadPacket reads a packet from the network. +func (c *Conn) ReadPacket() (*Packet, int, error) { + var pkt Packet + + // We use a single byte blob to read all headers to avoid allocating a bunch + // of identical buffers when we only need one + headerBuf := c.headerBuf + + // Read the entire 24-byte header first + _, err := io.ReadFull(c.stream, headerBuf) + if err != nil { + return nil, 0, err + } + + // Grab the length of the full body + bodyLen := binary.BigEndian.Uint32(headerBuf[8:]) + + // Read the remaining bytes of the body + bodyBuf := make([]byte, bodyLen) + _, err = io.ReadFull(c.stream, bodyBuf) + if err != nil { + return nil, 0, err + } + + pktMagic := CmdMagic(headerBuf[0]) + if pktMagic == cmdMagicReqExt { + pkt.Magic = CmdMagicReq + } else if pktMagic == cmdMagicResExt { + pkt.Magic = CmdMagicRes + } else { + pkt.Magic = pktMagic + } + + pkt.Command = CmdCode(headerBuf[1]) + pkt.Datatype = headerBuf[5] + pkt.Opaque = binary.BigEndian.Uint32(headerBuf[12:]) + pkt.Cas = binary.BigEndian.Uint64(headerBuf[16:]) + + if pktMagic == CmdMagicReq || pktMagic == cmdMagicReqExt { + pkt.Vbucket = binary.BigEndian.Uint16(headerBuf[6:]) + } else if pktMagic == CmdMagicRes || pktMagic == cmdMagicResExt { + pkt.Status = StatusCode(binary.BigEndian.Uint16(headerBuf[6:])) + } else { + return nil, 0, errors.New("cannot decode status/vbucket for unknown packet magic") + } + + extLen := int(headerBuf[4]) + keyLen := 0 + framesLen := 0 + if pktMagic == cmdMagicReqExt || pktMagic == cmdMagicResExt { + framesLen = int(headerBuf[2]) + keyLen = int(headerBuf[3]) + } else { + keyLen = int(binary.BigEndian.Uint16(headerBuf[2:])) + } + + bodyPos := 0 + + if framesLen > 0 { + framesBuf := bodyBuf[bodyPos : bodyPos+framesLen] + framePos := 0 + for framePos < framesLen { + frameHeader := framesBuf[framePos] + framePos++ + + frType := frameType((frameHeader & 0xF0) >> 4) + if frType == 15 { + frType = 15 + frameType(framesBuf[framePos]) + framePos++ + } + + frameLen := int((frameHeader & 0x0F) >> 0) + if frameLen == 15 { + frameLen = 15 + int(framesBuf[framePos]) + framePos++ + } + + frameBody := framesBuf[framePos : framePos+frameLen] + framePos += frameLen + + if pktMagic == cmdMagicReqExt { + if frType == frameTypeReqBarrier && frameLen == 0 { + pkt.BarrierFrame = &BarrierFrame{} + } else if frType == frameTypeReqSyncDurability && (frameLen == 1 || frameLen == 3) { + pkt.DurabilityLevelFrame = &DurabilityLevelFrame{ + DurabilityLevel: DurabilityLevel(frameBody[0]), + } + if frameLen == 3 { + durabilityTimeoutMillis := binary.BigEndian.Uint16(frameBody[1:]) + pkt.DurabilityTimeoutFrame = &DurabilityTimeoutFrame{ + DurabilityTimeout: time.Duration(durabilityTimeoutMillis) * time.Millisecond, + } + } else { + // We follow the semantic that duplicate frames overwrite previous ones, + // since the timeout frame is 'virtual' to us, we need to clear it in case + // this is a duplicate frame. + pkt.DurabilityTimeoutFrame = nil + } + } else if frType == frameTypeReqStreamID && frameLen == 2 { + pkt.StreamIDFrame = &StreamIDFrame{ + StreamID: binary.BigEndian.Uint16(frameBody), + } + } else if frType == frameTypeReqOpenTracing { + pkt.OpenTracingFrame = &OpenTracingFrame{ + TraceContext: frameBody, + } + } else { + // If we don't understand this frame type, we record it as an + // UnsupportedFrame (as opposed to dropping it blindly) + pkt.UnsupportedFrames = append(pkt.UnsupportedFrames, UnsupportedFrame{ + Type: frType, + Data: frameBody, + }) + } + } else if pktMagic == cmdMagicResExt { + if frType == frameTypeResSrvDuration && frameLen == 2 { + serverDurationEnc := binary.BigEndian.Uint16(frameBody) + pkt.ServerDurationFrame = &ServerDurationFrame{ + ServerDuration: DecodeSrvDura16(serverDurationEnc), + } + } else { + // If we don't understand this frame type, we record it as an + // UnsupportedFrame (as opposed to dropping it blindly) + pkt.UnsupportedFrames = append(pkt.UnsupportedFrames, UnsupportedFrame{ + Type: frType, + Data: frameBody, + }) + } + } else { + return nil, 0, errors.New("got unexpected magic when decoding frames") + } + } + + bodyPos += framesLen + } + + pkt.Extras = bodyBuf[bodyPos : bodyPos+extLen] + bodyPos += extLen + + keyVal := bodyBuf[bodyPos : bodyPos+keyLen] + bodyPos += keyLen + if c.IsFeatureEnabled(FeatureCollections) { + if pkt.Command == CmdObserve { + // While it's possible that the Observe operation is in fact supported with collections + // enabled, we don't currently implement that operation for simplicity, as the key is + // actually hidden away in the value data instead of the usual key data. + return nil, 0, errors.New("the observe operation is not supported with collections enabled") + } + + if IsCommandCollectionEncoded(pkt.Command) && keyLen > 0 { + collectionID, idLen, err := DecodeULEB128_32(keyVal) + if err != nil { + return nil, 0, err + } + + keyVal = keyVal[idLen:] + pkt.CollectionID = collectionID + } + } + pkt.Key = keyVal + + pkt.Value = bodyBuf[bodyPos:] + + return &pkt, 24 + int(bodyLen), nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/constants.go b/vendor/github.com/couchbase/gocbcore/v9/memd/constants.go new file mode 100644 index 000000000000..493ce03c06ca --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/constants.go @@ -0,0 +1,393 @@ +package memd + +import "fmt" + +// CmdMagic represents the magic number that begins the header +// of every packet and informs the rest of the header format. +type CmdMagic uint8 + +const ( + // CmdMagicReq indicates that the packet is a request. + CmdMagicReq = CmdMagic(0x80) + + // CmdMagicRes indicates that the packet is a response. + CmdMagicRes = CmdMagic(0x81) + + // These are private rather than public as the library will automatically + // switch to and from these magics based on the use of frames within a packet. + cmdMagicReqExt = CmdMagic(0x08) + cmdMagicResExt = CmdMagic(0x18) +) + +// frameType specifies which kind of frame extra a particular block belongs to. +// This is a private type since we automatically encode this internally based on +// whether the specific frame block is attached to the packet. +type frameType uint8 + +const ( + frameTypeReqBarrier = frameType(0) + frameTypeReqSyncDurability = frameType(1) + frameTypeReqStreamID = frameType(2) + frameTypeReqOpenTracing = frameType(3) + frameTypeResSrvDuration = frameType(0) +) + +// HelloFeature represents a feature code included in a memcached +// HELLO operation. +type HelloFeature uint16 + +const ( + // FeatureDatatype indicates support for Datatype fields. + FeatureDatatype = HelloFeature(0x01) + + // FeatureTLS indicates support for TLS + FeatureTLS = HelloFeature(0x02) + + // FeatureTCPNoDelay indicates support for TCP no-delay. + FeatureTCPNoDelay = HelloFeature(0x03) + + // FeatureSeqNo indicates support for mutation tokens. + FeatureSeqNo = HelloFeature(0x04) + + // FeatureTCPDelay indicates support for TCP delay. + FeatureTCPDelay = HelloFeature(0x05) + + // FeatureXattr indicates support for document xattrs. + FeatureXattr = HelloFeature(0x06) + + // FeatureXerror indicates support for extended errors. + FeatureXerror = HelloFeature(0x07) + + // FeatureSelectBucket indicates support for the SelectBucket operation. + FeatureSelectBucket = HelloFeature(0x08) + + // Feature 0x09 is reserved and cannot be used. + + // FeatureSnappy indicates support for snappy compressed documents. + FeatureSnappy = HelloFeature(0x0a) + + // FeatureJSON indicates support for JSON datatype data. + FeatureJSON = HelloFeature(0x0b) + + // FeatureDuplex indicates support for duplex communications. + FeatureDuplex = HelloFeature(0x0c) + + // FeatureClusterMapNotif indicates support for cluster-map update notifications. + FeatureClusterMapNotif = HelloFeature(0x0d) + + // FeatureUnorderedExec indicates support for unordered execution of operations. + FeatureUnorderedExec = HelloFeature(0x0e) + + // FeatureDurations indicates support for server durations. + FeatureDurations = HelloFeature(0xf) + + // FeatureAltRequests indicates support for requests with flexible frame extras. + FeatureAltRequests = HelloFeature(0x10) + + // FeatureSyncReplication indicates support for requests synchronous durability requirements. + FeatureSyncReplication = HelloFeature(0x11) + + // FeatureCollections indicates support for collections. + FeatureCollections = HelloFeature(0x12) + + // FeatureOpenTracing indicates support for OpenTracing. + FeatureOpenTracing = HelloFeature(0x13) + + // FeatureCreateAsDeleted indicates support for the create as deleted feature. + FeatureCreateAsDeleted = HelloFeature(0x17) +) + +// StreamEndStatus represents the reason for a DCP stream ending +type StreamEndStatus uint32 + +const ( + // StreamEndOK represents that the stream ended successfully. + StreamEndOK = StreamEndStatus(0x00) + + // StreamEndClosed represents that the stream was forcefully closed. + StreamEndClosed = StreamEndStatus(0x01) + + // StreamEndStateChanged represents that the stream was closed due to a state change. + StreamEndStateChanged = StreamEndStatus(0x02) + + // StreamEndDisconnected represents that the stream was closed due to disconnection. + StreamEndDisconnected = StreamEndStatus(0x03) + + // StreamEndTooSlow represents that the stream was closed due to the stream being too slow. + StreamEndTooSlow = StreamEndStatus(0x04) + + // StreamEndBackfillFailed represents that the stream was closed due to backfill failing. + StreamEndBackfillFailed = StreamEndStatus(0x05) + + // StreamEndFilterEmpty represents that the stream was closed due to the filter being empty. + StreamEndFilterEmpty = StreamEndStatus(0x07) +) + +// KVText returns the textual representation of this StreamEndStatus. +func (code StreamEndStatus) KVText() string { + switch code { + case StreamEndOK: + return "success" + case StreamEndClosed: + return "stream closed" + case StreamEndStateChanged: + return "state changed" + case StreamEndDisconnected: + return "disconnected" + case StreamEndTooSlow: + return "too slow" + case StreamEndFilterEmpty: + return "filter empty" + case StreamEndBackfillFailed: + return "backfill failed" + default: + return fmt.Sprintf("unknown stream close reason (%d)", code) + } +} + +// StreamEventCode is the code for a DCP Stream event +type StreamEventCode uint32 + +const ( + // StreamEventCollectionCreate is the StreamEventCode for a collection create event + StreamEventCollectionCreate = StreamEventCode(0x00) + + // StreamEventCollectionDelete is the StreamEventCode for a collection delete event + StreamEventCollectionDelete = StreamEventCode(0x01) + + // StreamEventCollectionFlush is the StreamEventCode for a collection flush event + StreamEventCollectionFlush = StreamEventCode(0x02) + + // StreamEventScopeCreate is the StreamEventCode for a scope create event + StreamEventScopeCreate = StreamEventCode(0x03) + + // StreamEventScopeDelete is the StreamEventCode for a scope delete event + StreamEventScopeDelete = StreamEventCode(0x04) + + // StreamEventCollectionChanged is the StreamEventCode for a collection changed event + StreamEventCollectionChanged = StreamEventCode(0x05) +) + +// VbucketState represents the state of a particular vbucket on a particular server. +type VbucketState uint32 + +const ( + // VbucketStateActive indicates the vbucket is active on this server + VbucketStateActive = VbucketState(0x01) + + // VbucketStateReplica indicates the vbucket is a replica on this server + VbucketStateReplica = VbucketState(0x02) + + // VbucketStatePending indicates the vbucket is preparing to become active on this server. + VbucketStatePending = VbucketState(0x03) + + // VbucketStateDead indicates the vbucket is no longer valid on this server. + VbucketStateDead = VbucketState(0x04) +) + +// SetMetaOption represents possible option values for a SetMeta operation. +type SetMetaOption uint32 + +const ( + // ForceMetaOp disables conflict resolution for the document and allows the + // operation to be applied to an active, pending, or replica vbucket. + ForceMetaOp = SetMetaOption(0x01) + + // UseLwwConflictResolution switches to Last-Write-Wins conflict resolution + // for the document. + UseLwwConflictResolution = SetMetaOption(0x02) + + // RegenerateCas causes the server to invalidate the current CAS value for + // a document, and to generate a new one. + RegenerateCas = SetMetaOption(0x04) + + // SkipConflictResolution disables conflict resolution for the document. + SkipConflictResolution = SetMetaOption(0x08) + + // IsExpiration indicates that the message is for an expired document. + IsExpiration = SetMetaOption(0x10) +) + +// KeyState represents the various storage states of a key on the server. +type KeyState uint8 + +const ( + // KeyStateNotPersisted indicates the key is in memory, but not yet written to disk. + KeyStateNotPersisted = KeyState(0x00) + + // KeyStatePersisted indicates that the key has been written to disk. + KeyStatePersisted = KeyState(0x01) + + // KeyStateNotFound indicates that the key is not found in memory or on disk. + KeyStateNotFound = KeyState(0x80) + + // KeyStateDeleted indicates that the key has been written to disk as deleted. + KeyStateDeleted = KeyState(0x81) +) + +// SubDocOpType specifies the type of a sub-document operation. +type SubDocOpType uint8 + +const ( + // SubDocOpGet indicates the operation is a sub-document `Get` operation. + SubDocOpGet = SubDocOpType(CmdSubDocGet) + + // SubDocOpExists indicates the operation is a sub-document `Exists` operation. + SubDocOpExists = SubDocOpType(CmdSubDocExists) + + // SubDocOpGetCount indicates the operation is a sub-document `GetCount` operation. + SubDocOpGetCount = SubDocOpType(CmdSubDocGetCount) + + // SubDocOpDictAdd indicates the operation is a sub-document `Add` operation. + SubDocOpDictAdd = SubDocOpType(CmdSubDocDictAdd) + + // SubDocOpDictSet indicates the operation is a sub-document `Set` operation. + SubDocOpDictSet = SubDocOpType(CmdSubDocDictSet) + + // SubDocOpDelete indicates the operation is a sub-document `Remove` operation. + SubDocOpDelete = SubDocOpType(CmdSubDocDelete) + + // SubDocOpReplace indicates the operation is a sub-document `Replace` operation. + SubDocOpReplace = SubDocOpType(CmdSubDocReplace) + + // SubDocOpArrayPushLast indicates the operation is a sub-document `ArrayPushLast` operation. + SubDocOpArrayPushLast = SubDocOpType(CmdSubDocArrayPushLast) + + // SubDocOpArrayPushFirst indicates the operation is a sub-document `ArrayPushFirst` operation. + SubDocOpArrayPushFirst = SubDocOpType(CmdSubDocArrayPushFirst) + + // SubDocOpArrayInsert indicates the operation is a sub-document `ArrayInsert` operation. + SubDocOpArrayInsert = SubDocOpType(CmdSubDocArrayInsert) + + // SubDocOpArrayAddUnique indicates the operation is a sub-document `ArrayAddUnique` operation. + SubDocOpArrayAddUnique = SubDocOpType(CmdSubDocArrayAddUnique) + + // SubDocOpCounter indicates the operation is a sub-document `Counter` operation. + SubDocOpCounter = SubDocOpType(CmdSubDocCounter) + + // SubDocOpGetDoc represents a full document retrieval, for use with extended attribute ops. + SubDocOpGetDoc = SubDocOpType(CmdGet) + + // SubDocOpSetDoc represents a full document set, for use with extended attribute ops. + SubDocOpSetDoc = SubDocOpType(CmdSet) + + // SubDocOpAddDoc represents a full document add, for use with extended attribute ops. + SubDocOpAddDoc = SubDocOpType(CmdAdd) + + // SubDocOpDeleteDoc represents a full document delete, for use with extended attribute ops. + SubDocOpDeleteDoc = SubDocOpType(CmdDelete) +) + +// DcpOpenFlag specifies flags for DCP connections configured when the stream is opened. +type DcpOpenFlag uint32 + +const ( + // DcpOpenFlagProducer indicates this connection wants the other end to be a producer. + DcpOpenFlagProducer = DcpOpenFlag(0x01) + + // DcpOpenFlagNotifier indicates this connection wants the other end to be a notifier. + DcpOpenFlagNotifier = DcpOpenFlag(0x02) + + // DcpOpenFlagIncludeXattrs indicates the client wishes to receive extended attributes. + DcpOpenFlagIncludeXattrs = DcpOpenFlag(0x04) + + // DcpOpenFlagNoValue indicates the client does not wish to receive mutation values. + DcpOpenFlagNoValue = DcpOpenFlag(0x08) + + // DcpOpenFlagIncludeDeleteTimes indicates the client wishes to receive delete times. + DcpOpenFlagIncludeDeleteTimes = DcpOpenFlag(0x20) +) + +// DcpStreamAddFlag specifies flags for DCP streams configured when the stream is opened. +type DcpStreamAddFlag uint32 + +const ( + //DcpStreamAddFlagDiskOnly indicates that stream should only send items if they are on disk + DcpStreamAddFlagDiskOnly = DcpStreamAddFlag(0x02) + + // DcpStreamAddFlagLatest indicates this stream wants to get data up to the latest seqno. + DcpStreamAddFlagLatest = DcpStreamAddFlag(0x04) + + // DcpStreamAddFlagActiveOnly indicates this stream should only connect to an active vbucket. + DcpStreamAddFlagActiveOnly = DcpStreamAddFlag(0x10) + + // DcpStreamAddFlagStrictVBUUID indicates the vbuuid must match unless the start seqno + // is 0 and the vbuuid is also 0. + DcpStreamAddFlagStrictVBUUID = DcpStreamAddFlag(0x20) +) + +// DatatypeFlag specifies data flags for the value of a document. +type DatatypeFlag uint8 + +const ( + // DatatypeFlagJSON indicates the server believes the value payload to be JSON. + DatatypeFlagJSON = DatatypeFlag(0x01) + + // DatatypeFlagCompressed indicates the value payload is compressed. + DatatypeFlagCompressed = DatatypeFlag(0x02) + + // DatatypeFlagXattrs indicates the inclusion of xattr data in the value payload. + DatatypeFlagXattrs = DatatypeFlag(0x04) +) + +// SubdocFlag specifies flags for a sub-document operation. +type SubdocFlag uint8 + +const ( + // SubdocFlagNone indicates no special treatment for this operation. + SubdocFlagNone = SubdocFlag(0x00) + + // SubdocFlagMkDirP indicates that the path should be created if it does not already exist. + SubdocFlagMkDirP = SubdocFlag(0x01) + + // 0x02 is unused, formally SubdocFlagMkDoc + + // SubdocFlagXattrPath indicates that the path refers to an Xattr rather than the document body. + SubdocFlagXattrPath = SubdocFlag(0x04) + + // 0x08 is unused, formally SubdocFlagAccessDeleted + + // SubdocFlagExpandMacros indicates that the value portion of any sub-document mutations + // should be expanded if they contain macros such as ${Mutation.CAS}. + SubdocFlagExpandMacros = SubdocFlag(0x10) +) + +// SubdocDocFlag specifies document-level flags for a sub-document operation. +type SubdocDocFlag uint8 + +const ( + // SubdocDocFlagNone indicates no special treatment for this operation. + SubdocDocFlagNone = SubdocDocFlag(0x00) + + // SubdocDocFlagMkDoc indicates that the document should be created if it does not already exist. + SubdocDocFlagMkDoc = SubdocDocFlag(0x01) + + // SubdocDocFlagAddDoc indices that this operation should be an add rather than set. + SubdocDocFlagAddDoc = SubdocDocFlag(0x02) + + // SubdocDocFlagAccessDeleted indicates that you wish to receive soft-deleted documents. + // Internal: This should never be used and is not supported. + SubdocDocFlagAccessDeleted = SubdocDocFlag(0x04) + + // SubdocDocFlagCreateAsDeleted indicates that the document should be created as deleted. + // That is, to create a tombstone only. + // Internal: This should never be used and is not supported. + SubdocDocFlagCreateAsDeleted = SubdocDocFlag(0x08) +) + +// DurabilityLevel specifies the level to use for enhanced durability requirements. +type DurabilityLevel uint8 + +const ( + // DurabilityLevelMajority specifies that a change must be replicated to (held in memory) + // a majority of the nodes for the bucket. + DurabilityLevelMajority = DurabilityLevel(0x01) + + // DurabilityLevelMajorityAndPersistOnMaster specifies that a change must be replicated to (held in memory) + // a majority of the nodes for the bucket and additionally persisted to disk on the active node. + DurabilityLevelMajorityAndPersistOnMaster = DurabilityLevel(0x02) + + // DurabilityLevelPersistToMajority specifies that a change must be persisted to (written to disk) + // a majority for the bucket. + DurabilityLevelPersistToMajority = DurabilityLevel(0x03) +) diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/packet.go b/vendor/github.com/couchbase/gocbcore/v9/memd/packet.go new file mode 100644 index 000000000000..8f59531872ef --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/packet.go @@ -0,0 +1,73 @@ +package memd + +import "time" + +// BarrierFrame is used to signal to the server that this command should be +// barriered and must not be executed concurrently with other commands. +type BarrierFrame struct { + // Barrier frames have no additional configuration, but their existence + // triggers the barriering behaviour. +} + +// DurabilityLevelFrame allows you to specify a durability level for an +// operation through the frame extras. +type DurabilityLevelFrame struct { + DurabilityLevel DurabilityLevel +} + +// DurabilityTimeoutFrame allows you to specify a specific timeout for +// durability operations to timeout. Note that this frame is actually +// an extension of DurabilityLevelFrame and requires that frame to also +// be used in order to function. +type DurabilityTimeoutFrame struct { + DurabilityTimeout time.Duration +} + +// StreamIDFrame provides information about which stream this particular +// operation is related to (used for DCP streams). +type StreamIDFrame struct { + StreamID uint16 +} + +// OpenTracingFrame allows open tracing context information to be included +// along with a command which is being performed. +type OpenTracingFrame struct { + TraceContext []byte +} + +// ServerDurationFrame allows the server to return information about the +// period of time an operation took to complete. +type ServerDurationFrame struct { + ServerDuration time.Duration +} + +// UnsupportedFrame is used to include an unsupported frame type in the +// packet data to enable further processing if needed. +type UnsupportedFrame struct { + Type frameType + Data []byte +} + +// Packet represents a single request or response packet being exchanged +// between two clients. +type Packet struct { + Magic CmdMagic + Command CmdCode + Datatype uint8 + Status StatusCode + Vbucket uint16 + Opaque uint32 + Cas uint64 + CollectionID uint32 + Key []byte + Extras []byte + Value []byte + + BarrierFrame *BarrierFrame + DurabilityLevelFrame *DurabilityLevelFrame + DurabilityTimeoutFrame *DurabilityTimeoutFrame + StreamIDFrame *StreamIDFrame + OpenTracingFrame *OpenTracingFrame + ServerDurationFrame *ServerDurationFrame + UnsupportedFrames []UnsupportedFrame +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/srvdura16.go b/vendor/github.com/couchbase/gocbcore/v9/memd/srvdura16.go new file mode 100644 index 000000000000..262935376b0b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/srvdura16.go @@ -0,0 +1,24 @@ +package memd + +import ( + "math" + "time" +) + +// EncodeSrvDura16 takes a standard go time duration and encodes it into +// the appropriate format for the server. +func EncodeSrvDura16(dura time.Duration) uint16 { + serverDurationUs := dura / time.Microsecond + serverDurationEnc := int(math.Pow(float64(serverDurationUs)*2, 1.0/1.74)) + if serverDurationEnc > 65535 { + serverDurationEnc = 65535 + } + return uint16(serverDurationEnc) +} + +// DecodeSrvDura16 takes an encoded operation duration from the server +// and converts it to a standard Go time duration. +func DecodeSrvDura16(enc uint16) time.Duration { + serverDurationUs := math.Round(math.Pow(float64(enc), 1.74) / 2) + return time.Duration(serverDurationUs) * time.Microsecond +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/statuscode.go b/vendor/github.com/couchbase/gocbcore/v9/memd/statuscode.go new file mode 100644 index 000000000000..0bcb2202cda7 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/statuscode.go @@ -0,0 +1,291 @@ +package memd + +import "fmt" + +// StatusCode represents a memcached response status. +type StatusCode uint16 + +const ( + // StatusSuccess indicates the operation completed successfully. + StatusSuccess = StatusCode(0x00) + + // StatusKeyNotFound occurs when an operation is performed on a key that does not exist. + StatusKeyNotFound = StatusCode(0x01) + + // StatusKeyExists occurs when an operation is performed on a key that could not be found. + StatusKeyExists = StatusCode(0x02) + + // StatusTooBig occurs when an operation attempts to store more data in a single document + // than the server is capable of storing (by default, this is a 20MB limit). + StatusTooBig = StatusCode(0x03) + + // StatusInvalidArgs occurs when the server receives invalid arguments for an operation. + StatusInvalidArgs = StatusCode(0x04) + + // StatusNotStored occurs when the server fails to store a key. + StatusNotStored = StatusCode(0x05) + + // StatusBadDelta occurs when an invalid delta value is specified to a counter operation. + StatusBadDelta = StatusCode(0x06) + + // StatusNotMyVBucket occurs when an operation is dispatched to a server which is + // non-authoritative for a specific vbucket. + StatusNotMyVBucket = StatusCode(0x07) + + // StatusNoBucket occurs when no bucket was selected on a connection. + StatusNoBucket = StatusCode(0x08) + + // StatusLocked occurs when an operation fails due to the document being locked. + StatusLocked = StatusCode(0x09) + + // StatusAuthStale occurs when authentication credentials have become invalidated. + StatusAuthStale = StatusCode(0x1f) + + // StatusAuthError occurs when the authentication information provided was not valid. + StatusAuthError = StatusCode(0x20) + + // StatusAuthContinue occurs in multi-step authentication when more authentication + // work needs to be performed in order to complete the authentication process. + StatusAuthContinue = StatusCode(0x21) + + // StatusRangeError occurs when the range specified to the server is not valid. + StatusRangeError = StatusCode(0x22) + + // StatusRollback occurs when a DCP stream fails to open due to a rollback having + // previously occurred since the last time the stream was opened. + StatusRollback = StatusCode(0x23) + + // StatusAccessError occurs when an access error occurs. + StatusAccessError = StatusCode(0x24) + + // StatusNotInitialized is sent by servers which are still initializing, and are not + // yet ready to accept operations on behalf of a particular bucket. + StatusNotInitialized = StatusCode(0x25) + + // StatusUnknownCommand occurs when an unknown operation is sent to a server. + StatusUnknownCommand = StatusCode(0x81) + + // StatusOutOfMemory occurs when the server cannot service a request due to memory + // limitations. + StatusOutOfMemory = StatusCode(0x82) + + // StatusNotSupported occurs when an operation is understood by the server, but that + // operation is not supported on this server (occurs for a variety of reasons). + StatusNotSupported = StatusCode(0x83) + + // StatusInternalError occurs when internal errors prevent the server from processing + // your request. + StatusInternalError = StatusCode(0x84) + + // StatusBusy occurs when the server is too busy to process your request right away. + // Attempting the operation at a later time will likely succeed. + StatusBusy = StatusCode(0x85) + + // StatusTmpFail occurs when a temporary failure is preventing the server from + // processing your request. + StatusTmpFail = StatusCode(0x86) + + // StatusCollectionUnknown occurs when a Collection cannot be found. + StatusCollectionUnknown = StatusCode(0x88) + + // StatusScopeUnknown occurs when a Scope cannot be found. + StatusScopeUnknown = StatusCode(0x8c) + + // StatusDurabilityInvalidLevel occurs when an invalid durability level was requested. + StatusDurabilityInvalidLevel = StatusCode(0xa0) + + // StatusDurabilityImpossible occurs when a request is performed with impossible + // durability level requirements. + StatusDurabilityImpossible = StatusCode(0xa1) + + // StatusSyncWriteInProgress occurs when an attempt is made to write to a key that has + // a SyncWrite pending. + StatusSyncWriteInProgress = StatusCode(0xa2) + + // StatusSyncWriteAmbiguous occurs when an SyncWrite does not complete in the specified + // time and the result is ambiguous. + StatusSyncWriteAmbiguous = StatusCode(0xa3) + + // StatusSyncWriteReCommitInProgress occurs when an SyncWrite is being recommitted. + StatusSyncWriteReCommitInProgress = StatusCode(0xa4) + + // StatusSubDocPathNotFound occurs when a sub-document operation targets a path + // which does not exist in the specifie document. + StatusSubDocPathNotFound = StatusCode(0xc0) + + // StatusSubDocPathMismatch occurs when a sub-document operation specifies a path + // which does not match the document structure (field access on an array). + StatusSubDocPathMismatch = StatusCode(0xc1) + + // StatusSubDocPathInvalid occurs when a sub-document path could not be parsed. + StatusSubDocPathInvalid = StatusCode(0xc2) + + // StatusSubDocPathTooBig occurs when a sub-document path is too big. + StatusSubDocPathTooBig = StatusCode(0xc3) + + // StatusSubDocDocTooDeep occurs when an operation would cause a document to be + // nested beyond the depth limits allowed by the sub-document specification. + StatusSubDocDocTooDeep = StatusCode(0xc4) + + // StatusSubDocCantInsert occurs when a sub-document operation could not insert. + StatusSubDocCantInsert = StatusCode(0xc5) + + // StatusSubDocNotJSON occurs when a sub-document operation is performed on a + // document which is not JSON. + StatusSubDocNotJSON = StatusCode(0xc6) + + // StatusSubDocBadRange occurs when a sub-document operation is performed with + // a bad range. + StatusSubDocBadRange = StatusCode(0xc7) + + // StatusSubDocBadDelta occurs when a sub-document counter operation is performed + // and the specified delta is not valid. + StatusSubDocBadDelta = StatusCode(0xc8) + + // StatusSubDocPathExists occurs when a sub-document operation expects a path not + // to exists, but the path was found in the document. + StatusSubDocPathExists = StatusCode(0xc9) + + // StatusSubDocValueTooDeep occurs when a sub-document operation specifies a value + // which is deeper than the depth limits of the sub-document specification. + StatusSubDocValueTooDeep = StatusCode(0xca) + + // StatusSubDocBadCombo occurs when a multi-operation sub-document operation is + // performed and operations within the package of ops conflict with each other. + StatusSubDocBadCombo = StatusCode(0xcb) + + // StatusSubDocBadMulti occurs when a multi-operation sub-document operation is + // performed and operations within the package of ops conflict with each other. + StatusSubDocBadMulti = StatusCode(0xcc) + + // StatusSubDocSuccessDeleted occurs when a multi-operation sub-document operation + // is performed on a soft-deleted document. + StatusSubDocSuccessDeleted = StatusCode(0xcd) + + // StatusSubDocXattrInvalidFlagCombo occurs when an invalid set of + // extended-attribute flags is passed to a sub-document operation. + StatusSubDocXattrInvalidFlagCombo = StatusCode(0xce) + + // StatusSubDocXattrInvalidKeyCombo occurs when an invalid set of key operations + // are specified for a extended-attribute sub-document operation. + StatusSubDocXattrInvalidKeyCombo = StatusCode(0xcf) + + // StatusSubDocXattrUnknownMacro occurs when an invalid macro value is specified. + StatusSubDocXattrUnknownMacro = StatusCode(0xd0) + + // StatusSubDocXattrUnknownVAttr occurs when an invalid virtual attribute is specified. + StatusSubDocXattrUnknownVAttr = StatusCode(0xd1) + + // StatusSubDocXattrCannotModifyVAttr occurs when a mutation is attempted upon + // a virtual attribute (which are immutable by definition). + StatusSubDocXattrCannotModifyVAttr = StatusCode(0xd2) + + // StatusSubDocMultiPathFailureDeleted occurs when a Multi Path Failure occurs on + // a soft-deleted document. + StatusSubDocMultiPathFailureDeleted = StatusCode(0xd3) +) + +// KVText returns the textual representation of this StatusCode. +func (code StatusCode) KVText() string { + switch code { + case StatusSuccess: + return "success" + case StatusKeyNotFound: + return "key not found" + case StatusKeyExists: + return "key already exists, if a cas was provided the key exists with a different cas" + case StatusTooBig: + return "document value was too large" + case StatusInvalidArgs: + return "invalid arguments" + case StatusNotStored: + return "document could not be stored" + case StatusBadDelta: + return "invalid delta was passed" + case StatusNotMyVBucket: + return "operation sent to incorrect server" + case StatusNoBucket: + return "not connected to a bucket" + case StatusAuthStale: + return "authentication context is stale, try re-authenticating" + case StatusAuthError: + return "authentication error" + case StatusAuthContinue: + return "more authentication steps needed" + case StatusRangeError: + return "requested value is outside range" + case StatusAccessError: + return "no access" + case StatusNotInitialized: + return "cluster is being initialized, requests are blocked" + case StatusRollback: + return "rollback is required" + case StatusUnknownCommand: + return "unknown command was received" + case StatusOutOfMemory: + return "server is out of memory" + case StatusNotSupported: + return "server does not support this command" + case StatusInternalError: + return "internal server error" + case StatusBusy: + return "server is busy, try again later" + case StatusTmpFail: + return "temporary failure occurred, try again later" + case StatusCollectionUnknown: + return "the requested collection cannot be found" + case StatusScopeUnknown: + return "the requested scope cannot be found." + case StatusDurabilityInvalidLevel: + return "invalid request, invalid durability level specified." + case StatusDurabilityImpossible: + return "the requested durability requirements are impossible." + case StatusSyncWriteInProgress: + return "key already has syncwrite pending." + case StatusSyncWriteAmbiguous: + return "the syncwrite request did not complete in time." + case StatusSubDocPathNotFound: + return "sub-document path does not exist" + case StatusSubDocPathMismatch: + return "type of element in sub-document path conflicts with type in document" + case StatusSubDocPathInvalid: + return "malformed sub-document path" + case StatusSubDocPathTooBig: + return "sub-document contains too many components" + case StatusSubDocDocTooDeep: + return "existing document contains too many levels of nesting" + case StatusSubDocCantInsert: + return "subdocument operation would invalidate the JSON" + case StatusSubDocNotJSON: + return "existing document is not valid JSON" + case StatusSubDocBadRange: + return "existing numeric value is too large" + case StatusSubDocBadDelta: + return "numeric operation would yield a number that is too large, or " + + "a zero delta was specified" + case StatusSubDocPathExists: + return "given path already exists in the document" + case StatusSubDocValueTooDeep: + return "value is too deep to insert" + case StatusSubDocBadCombo: + return "incorrectly matched subdocument operation types" + case StatusSubDocBadMulti: + return "could not execute one or more multi lookups or mutations" + case StatusSubDocSuccessDeleted: + return "document is soft-deleted" + case StatusSubDocXattrInvalidFlagCombo: + return "invalid xattr flag combination" + case StatusSubDocXattrInvalidKeyCombo: + return "invalid xattr key combination" + case StatusSubDocXattrUnknownMacro: + return "unknown xattr macro" + case StatusSubDocXattrUnknownVAttr: + return "unknown xattr virtual attribute" + case StatusSubDocXattrCannotModifyVAttr: + return "cannot modify virtual attributes" + case StatusSubDocMultiPathFailureDeleted: + return "sub-document multi-path error" + default: + return fmt.Sprintf("unknown kv status code (%d)", code) + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memd/uleb128.go b/vendor/github.com/couchbase/gocbcore/v9/memd/uleb128.go new file mode 100644 index 000000000000..8e2a03d71cad --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memd/uleb128.go @@ -0,0 +1,53 @@ +package memd + +import ( + "errors" +) + +// AppendULEB128_32 appends a 32-bit number encoded as ULEB128 to a byte slice +func AppendULEB128_32(b []byte, v uint32) []byte { + for { + c := uint8(v & 0x7f) + v >>= 7 + if v != 0 { + c |= 0x80 + } + b = append(b, c) + if c&0x80 == 0 { + break + } + } + return b +} + +// DecodeULEB128_32 decodes a ULEB128 encoded number into a uint32 +func DecodeULEB128_32(b []byte) (uint32, int, error) { + if len(b) == 0 { + return 0, 0, errors.New("no data provided") + } + var u uint64 + var n int + for i := 0; ; i++ { + if i >= len(b) { + return 0, 0, errors.New("encoded number is longer than provided data") + } + if i*7 > 32 { + // oversize and then break to get caught below + u = 0xffffffffffffffff + break + } + + u |= uint64(b[i]&0x7f) << (i * 7) + + if b[i]&0x80 == 0 { + n = i + 1 + break + } + } + + if u > 0xffffffff { + return 0, 0, errors.New("encoded data is longer than 32 bits") + } + + return uint32(u), n, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdclient.go b/vendor/github.com/couchbase/gocbcore/v9/memdclient.go new file mode 100644 index 000000000000..ff6600271455 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdclient.go @@ -0,0 +1,1051 @@ +package gocbcore + +import ( + "encoding/binary" + "errors" + "io" + "strings" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/couchbase/gocbcore/v9/memd" + + "github.com/golang/snappy" +) + +func isCompressibleOp(command memd.CmdCode) bool { + switch command { + case memd.CmdSet: + fallthrough + case memd.CmdAdd: + fallthrough + case memd.CmdReplace: + fallthrough + case memd.CmdAppend: + fallthrough + case memd.CmdPrepend: + return true + } + return false +} + +type postCompleteErrorHandler func(resp *memdQResponse, req *memdQRequest, err error) (bool, error) + +type memdClient struct { + lastActivity int64 + dcpAckSize int + dcpFlowRecv int + closeNotify chan bool + connID string + closed bool + conn memdConn + opList memdOpMap + features []memd.HelloFeature + lock sync.Mutex + streamEndNotSupported bool + breaker circuitBreaker + postErrHandler postCompleteErrorHandler + tracer *tracerComponent + zombieLogger *zombieLoggerComponent + + compressionMinSize int + compressionMinRatio float64 + disableDecompression bool +} + +type dcpBuffer struct { + resp *memdQResponse + packetLen int + isInternal bool +} + +type memdClientProps struct { + ClientID string + + CompressionMinSize int + CompressionMinRatio float64 + DisableDecompression bool +} + +func newMemdClient(props memdClientProps, conn memdConn, breakerCfg CircuitBreakerConfig, postErrHandler postCompleteErrorHandler, + tracer *tracerComponent, zombieLogger *zombieLoggerComponent) *memdClient { + client := memdClient{ + closeNotify: make(chan bool), + connID: props.ClientID + "/" + formatCbUID(randomCbUID()), + postErrHandler: postErrHandler, + tracer: tracer, + zombieLogger: zombieLogger, + conn: conn, + + compressionMinRatio: props.CompressionMinRatio, + compressionMinSize: props.CompressionMinSize, + disableDecompression: props.DisableDecompression, + } + + if breakerCfg.Enabled { + client.breaker = newLazyCircuitBreaker(breakerCfg, client.sendCanary) + } else { + client.breaker = newNoopCircuitBreaker() + } + + client.run() + return &client +} + +func (client *memdClient) SupportsFeature(feature memd.HelloFeature) bool { + return checkSupportsFeature(client.features, feature) +} + +func (client *memdClient) EnableDcpBufferAck(bufferAckSize int) { + client.dcpAckSize = bufferAckSize +} + +func (client *memdClient) maybeSendDcpBufferAck(packetLen int) { + client.dcpFlowRecv += packetLen + if client.dcpFlowRecv < client.dcpAckSize { + return + } + + ackAmt := client.dcpFlowRecv + + extrasBuf := make([]byte, 4) + binary.BigEndian.PutUint32(extrasBuf, uint32(ackAmt)) + + err := client.conn.WritePacket(&memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDcpBufferAck, + Extras: extrasBuf, + }) + if err != nil { + logWarnf("Failed to dispatch DCP buffer ack: %s", err) + } + + client.dcpFlowRecv -= ackAmt +} + +func (client *memdClient) Address() string { + return client.conn.RemoteAddr() +} + +func (client *memdClient) CloseNotify() chan bool { + return client.closeNotify +} + +func (client *memdClient) takeRequestOwnership(req *memdQRequest) bool { + client.lock.Lock() + defer client.lock.Unlock() + + if client.closed { + logDebugf("Attempted to put dispatched op in drained opmap") + return false + } + + if !atomic.CompareAndSwapPointer(&req.waitingIn, nil, unsafe.Pointer(client)) { + logDebugf("Attempted to put dispatched op in new opmap") + return false + } + + if req.isCancelled() { + atomic.CompareAndSwapPointer(&req.waitingIn, unsafe.Pointer(client), nil) + return false + } + + connInfo := memdQRequestConnInfo{ + lastDispatchedTo: client.Address(), + lastDispatchedFrom: client.conn.LocalAddr(), + lastConnectionID: client.connID, + } + req.SetConnectionInfo(connInfo) + + client.opList.Add(req) + return true +} + +func (client *memdClient) CancelRequest(req *memdQRequest, err error) bool { + client.lock.Lock() + defer client.lock.Unlock() + + if client.closed { + logDebugf("Attempted to remove op from drained opmap") + return false + } + + removed := client.opList.Remove(req) + if removed { + atomic.CompareAndSwapPointer(&req.waitingIn, unsafe.Pointer(client), nil) + } + + if client.breaker.CompletionCallback(err) { + client.breaker.MarkSuccessful() + } else { + client.breaker.MarkFailure() + } + + return removed +} + +func (client *memdClient) SendRequest(req *memdQRequest) error { + if !client.breaker.AllowsRequest() { + logSchedf("Circuit breaker interrupting request. %s to %s OP=0x%x. Opaque=%d", client.conn.LocalAddr(), client.Address(), req.Command, req.Opaque) + + req.cancelWithCallback(errCircuitBreakerOpen) + + return nil + } + + return client.internalSendRequest(req) +} + +func (client *memdClient) internalSendRequest(req *memdQRequest) error { + addSuccess := client.takeRequestOwnership(req) + if !addSuccess { + return errRequestCanceled + } + + packet := &req.Packet + if client.SupportsFeature(memd.FeatureSnappy) { + isCompressed := (packet.Datatype & uint8(memd.DatatypeFlagCompressed)) != 0 + packetSize := len(packet.Value) + if !isCompressed && packetSize > client.compressionMinSize && isCompressibleOp(packet.Command) { + compressedValue := snappy.Encode(nil, packet.Value) + if float64(len(compressedValue))/float64(packetSize) <= client.compressionMinRatio { + newPacket := *packet + newPacket.Value = compressedValue + newPacket.Datatype = newPacket.Datatype | uint8(memd.DatatypeFlagCompressed) + packet = &newPacket + } + } + } + + logSchedf("Writing request. %s to %s OP=0x%x. Opaque=%d", client.conn.LocalAddr(), client.Address(), req.Command, req.Opaque) + + client.tracer.StartNetTrace(req) + + err := client.conn.WritePacket(packet) + if err != nil { + logDebugf("memdClient write failure: %v", err) + return err + } + + return nil +} + +func (client *memdClient) resolveRequest(resp *memdQResponse) { + opIndex := resp.Opaque + + logSchedf("Handling response data. OP=0x%x. Opaque=%d. Status:%d", resp.Command, resp.Opaque, resp.Status) + + client.lock.Lock() + // Find the request that goes with this response, don't check if the client is + // closed so that we can handle orphaned responses. + req := client.opList.FindAndMaybeRemove(opIndex, resp.Status != memd.StatusSuccess) + client.lock.Unlock() + + if req == nil { + // There is no known request that goes with this response. Ignore it. + logDebugf("Received response with no corresponding request.") + if client.zombieLogger != nil { + client.zombieLogger.RecordZombieResponse(resp, client.connID, client.Address()) + } + return + } + if !req.Persistent || resp.Status != memd.StatusSuccess { + atomic.CompareAndSwapPointer(&req.waitingIn, unsafe.Pointer(client), nil) + } + + req.processingLock.Lock() + + if !req.Persistent { + stopNetTrace(req, resp, client.conn.LocalAddr(), client.conn.RemoteAddr()) + } + + isCompressed := (resp.Datatype & uint8(memd.DatatypeFlagCompressed)) != 0 + if isCompressed && !client.disableDecompression { + newValue, err := snappy.Decode(nil, resp.Value) + if err != nil { + req.processingLock.Unlock() + logDebugf("Failed to decompress value from the server for key `%s`.", req.Key) + return + } + + resp.Value = newValue + resp.Datatype = resp.Datatype & ^uint8(memd.DatatypeFlagCompressed) + } + + // Give the agent an opportunity to intercept the response first + var err error + if resp.Magic == memd.CmdMagicRes && resp.Status != memd.StatusSuccess { + err = getKvStatusCodeError(resp.Status) + } + + if client.breaker.CompletionCallback(err) { + client.breaker.MarkSuccessful() + } else { + client.breaker.MarkFailure() + } + + if !req.Persistent { + stopCmdTrace(req) + } + + req.processingLock.Unlock() + + if err != nil { + shortCircuited, routeErr := client.postErrHandler(resp, req, err) + if shortCircuited { + logSchedf("Routing callback intercepted response") + return + } + err = routeErr + } + + // Call the requests callback handler... + logSchedf("Dispatching response callback. OP=0x%x. Opaque=%d", resp.Command, resp.Opaque) + req.tryCallback(resp, err) +} + +func (client *memdClient) run() { + dcpBufferQ := make(chan *dcpBuffer) + dcpKillSwitch := make(chan bool) + dcpKillNotify := make(chan bool) + go func() { + procDcpItem := func(q *dcpBuffer, more bool) bool { + if !more { + dcpKillNotify <- true + return false + } + + logSchedf("Resolving response OP=0x%x. Opaque=%d", q.resp.Command, q.resp.Opaque) + client.resolveRequest(q.resp) + + // See below for information on MB-26363 for why this is here. + if !q.isInternal && client.dcpAckSize > 0 { + client.maybeSendDcpBufferAck(q.packetLen) + } + + return true + } + + for { + select { + case q, more := <-dcpBufferQ: + if !procDcpItem(q, more) { + return + } + case <-dcpKillSwitch: + close(dcpBufferQ) + } + } + }() + + go func() { + for { + packet, n, err := client.conn.ReadPacket() + if err != nil { + if !client.closed { + logErrorf("memdClient read failure: %v", err) + } + break + } + + resp := &memdQResponse{ + sourceAddr: client.conn.RemoteAddr(), + sourceConnID: client.connID, + Packet: *packet, + } + + atomic.StoreInt64(&client.lastActivity, time.Now().UnixNano()) + + // We handle DCP no-op's directly here so we can reply immediately. + if resp.Packet.Command == memd.CmdDcpNoop { + err := client.conn.WritePacket(&memd.Packet{ + Magic: memd.CmdMagicRes, + Command: memd.CmdDcpNoop, + Opaque: resp.Opaque, + }) + if err != nil { + logWarnf("Failed to dispatch DCP noop reply: %s", err) + } + continue + } + + // This is a fix for a bug in the server DCP implementation (MB-26363). This + // bug causes the server to fail to send a stream-end notification. The server + // does however synchronously stop the stream, and thus we can assume no more + // packets will be received following the close response. + if resp.Magic == memd.CmdMagicRes && resp.Command == memd.CmdDcpCloseStream && client.streamEndNotSupported { + closeReq := client.opList.Find(resp.Opaque) + if closeReq != nil { + vbID := closeReq.Vbucket + streamReq := client.opList.FindOpenStream(vbID) + if streamReq != nil { + endExtras := make([]byte, 4) + binary.BigEndian.PutUint32(endExtras, uint32(memd.StreamEndClosed)) + endResp := &memdQResponse{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdDcpStreamEnd, + Vbucket: vbID, + Opaque: streamReq.Opaque, + Extras: endExtras, + }, + } + dcpBufferQ <- &dcpBuffer{ + resp: endResp, + packetLen: n, + isInternal: true, + } + } + } + } + + switch resp.Packet.Command { + case memd.CmdDcpDeletion: + fallthrough + case memd.CmdDcpExpiration: + fallthrough + case memd.CmdDcpMutation: + fallthrough + case memd.CmdDcpSnapshotMarker: + fallthrough + case memd.CmdDcpEvent: + fallthrough + case memd.CmdDcpOsoSnapshot: + fallthrough + case memd.CmdDcpSeqNoAdvanced: + fallthrough + case memd.CmdDcpStreamEnd: + dcpBufferQ <- &dcpBuffer{ + resp: resp, + packetLen: n, + } + continue + default: + logSchedf("Resolving response OP=0x%x. Opaque=%d", resp.Command, resp.Opaque) + client.resolveRequest(resp) + } + } + + client.lock.Lock() + if client.closed { + client.lock.Unlock() + } else { + client.closed = true + client.lock.Unlock() + + err := client.conn.Close() + if err != nil { + // Lets log an error, as this is non-fatal + logErrorf("Failed to shut down client connection (%s)", err) + } + } + + dcpKillSwitch <- true + <-dcpKillNotify + + client.opList.Drain(func(req *memdQRequest) { + if !atomic.CompareAndSwapPointer(&req.waitingIn, unsafe.Pointer(client), nil) { + logWarnf("Encountered an unowned request in a client opMap") + } + + shortCircuited, routeErr := client.postErrHandler(nil, req, io.EOF) + if shortCircuited { + return + } + + req.tryCallback(nil, routeErr) + }) + + close(client.closeNotify) + }() +} + +func (client *memdClient) LocalAddress() string { + return client.conn.LocalAddr() +} + +func (client *memdClient) Close() error { + client.lock.Lock() + client.closed = true + client.lock.Unlock() + + return client.conn.Close() +} + +func (client *memdClient) sendCanary() { + errChan := make(chan error) + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + errChan <- err + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdNoop, + Datatype: 0, + Cas: 0, + Key: nil, + Value: nil, + }, + Callback: handler, + RetryStrategy: newFailFastRetryStrategy(), + } + + logDebugf("Sending NOOP request for %p/%s", client, client.Address()) + err := client.internalSendRequest(req) + if err != nil { + client.breaker.MarkFailure() + } + + timer := AcquireTimer(client.breaker.CanaryTimeout()) + select { + case <-timer.C: + if !req.internalCancel(errRequestCanceled) { + err := <-errChan + if err == nil { + logDebugf("NOOP request successful for %p/%s", client, client.Address()) + client.breaker.MarkSuccessful() + } else { + logDebugf("NOOP request failed for %p/%s", client, client.Address()) + client.breaker.MarkFailure() + } + } + client.breaker.MarkFailure() + case err := <-errChan: + if err == nil { + client.breaker.MarkSuccessful() + } else { + client.breaker.MarkFailure() + } + } +} + +func (client *memdClient) helloFeatures(props helloProps) []memd.HelloFeature { + var features []memd.HelloFeature + + // Send the TLS flag, which has unknown effects. + features = append(features, memd.FeatureTLS) + + // Indicate that we understand XATTRs + features = append(features, memd.FeatureXattr) + + // Indicates that we understand select buckets. + features = append(features, memd.FeatureSelectBucket) + + // If the user wants to use KV Error maps, lets enable them + features = append(features, memd.FeatureXerror) + + // If the user wants to use mutation tokens, lets enable them + if props.MutationTokensEnabled { + features = append(features, memd.FeatureSeqNo) + } + + // If the user wants on-the-wire compression, lets try to enable it + if props.CompressionEnabled { + features = append(features, memd.FeatureSnappy) + } + + if props.DurationsEnabled { + features = append(features, memd.FeatureDurations) + } + + if props.CollectionsEnabled { + features = append(features, memd.FeatureCollections) + } + + if props.OutOfOrderEnabled { + features = append(features, memd.FeatureUnorderedExec) + } + + // These flags are informational so don't actually enable anything + // but the enhanced durability flag tells us if the server supports + // the feature + features = append(features, memd.FeatureAltRequests) + features = append(features, memd.FeatureSyncReplication) + features = append(features, memd.FeatureCreateAsDeleted) + + return features +} + +type helloProps struct { + MutationTokensEnabled bool + CollectionsEnabled bool + CompressionEnabled bool + DurationsEnabled bool + OutOfOrderEnabled bool +} + +type bootstrapProps struct { + Bucket string + UserAgent string + AuthMechanisms []AuthMechanism + AuthHandler authFuncHandler + ErrMapManager *errMapComponent + HelloProps helloProps +} + +type memdInitFunc func(*memdClient, time.Time) error + +func (client *memdClient) Bootstrap(settings bootstrapProps, deadline time.Time, cb memdInitFunc) error { + logDebugf("Fetching cluster client data") + + bucket := settings.Bucket + features := client.helloFeatures(settings.HelloProps) + clientInfoStr := clientInfoString(client.connID, settings.UserAgent) + authMechanisms := settings.AuthMechanisms + + helloCh, err := client.ExecHello(clientInfoStr, features, deadline) + if err != nil { + logDebugf("Failed to execute HELLO (%v)", err) + return err + } + + errMapCh, err := client.ExecGetErrorMap(1, deadline) + if err != nil { + // GetErrorMap isn't integral to bootstrap succeeding + logDebugf("Failed to execute Get error map (%v)", err) + } + + var listMechsCh chan SaslListMechsCompleted + firstAuthMethod := settings.AuthHandler(client, deadline, authMechanisms[0]) + // If the auth method is nil then we don't actually need to do any auth so no need to Get the mechanisms. + if firstAuthMethod != nil { + listMechsCh = make(chan SaslListMechsCompleted) + err = client.SaslListMechs(deadline, func(mechs []AuthMechanism, err error) { + if err != nil { + logDebugf("Failed to fetch list auth mechs (%v)", err) + } + listMechsCh <- SaslListMechsCompleted{ + Err: err, + Mechs: mechs, + } + }) + if err != nil { + logDebugf("Failed to execute list auth mechs (%v)", err) + } + } + + var completedAuthCh chan BytesAndError + var continueAuthCh chan bool + if firstAuthMethod != nil { + completedAuthCh, continueAuthCh, err = firstAuthMethod() + if err != nil { + logDebugf("Failed to execute auth (%v)", err) + return err + } + } + + var selectCh chan BytesAndError + if continueAuthCh == nil { + if bucket != "" { + selectCh, err = client.ExecSelectBucket([]byte(bucket), deadline) + if err != nil { + logDebugf("Failed to execute select bucket (%v)", err) + return err + } + } + } else { + selectCh = client.continueAfterAuth(bucket, continueAuthCh, deadline) + } + + helloResp := <-helloCh + if helloResp.Err != nil { + logDebugf("Failed to hello with server (%v)", helloResp.Err) + return helloResp.Err + } + + errMapResp := <-errMapCh + if errMapResp.Err == nil { + settings.ErrMapManager.StoreErrorMap(errMapResp.Bytes) + } else { + logDebugf("Failed to fetch kv error map (%s)", errMapResp.Err) + } + + var serverAuthMechanisms []AuthMechanism + if listMechsCh != nil { + listMechsResp := <-listMechsCh + if listMechsResp.Err == nil { + serverAuthMechanisms = listMechsResp.Mechs + logDebugf("Server supported auth mechanisms: %v", serverAuthMechanisms) + } else { + logDebugf("Failed to fetch auth mechs from server (%v)", listMechsResp.Err) + } + } + + // If completedAuthCh isn't nil then we have attempted to do auth so we need to wait on the result of that. + if completedAuthCh != nil { + authResp := <-completedAuthCh + if authResp.Err != nil { + logDebugf("Failed to perform auth against server (%v)", authResp.Err) + // If there's an auth failure or there was only 1 mechanism to use then fail. + if len(authMechanisms) == 1 || errors.Is(authResp.Err, ErrAuthenticationFailure) { + return authResp.Err + } + + for { + var found bool + var mech AuthMechanism + found, mech, authMechanisms = findNextAuthMechanism(authMechanisms, serverAuthMechanisms) + if !found { + logDebugf("Failed to authenticate, all options exhausted") + return authResp.Err + } + + nextAuthFunc := settings.AuthHandler(client, deadline, mech) + if nextAuthFunc == nil { + // This can't really happen but just in case it somehow does. + logDebugf("Failed to authenticate, no available credentials") + return authResp.Err + } + completedAuthCh, continueAuthCh, err = nextAuthFunc() + if err != nil { + logDebugf("Failed to execute auth (%v)", err) + return err + } + if continueAuthCh == nil { + if bucket != "" { + selectCh, err = client.ExecSelectBucket([]byte(bucket), deadline) + if err != nil { + logDebugf("Failed to execute select bucket (%v)", err) + return err + } + } + } else { + selectCh = client.continueAfterAuth(bucket, continueAuthCh, deadline) + } + authResp = <-completedAuthCh + if authResp.Err == nil { + break + } + + logDebugf("Failed to perform auth against server (%v)", authResp.Err) + if errors.Is(authResp.Err, ErrAuthenticationFailure) { + return authResp.Err + } + } + } + logDebugf("Authenticated successfully") + } + + if selectCh != nil { + selectResp := <-selectCh + if selectResp.Err != nil { + logDebugf("Failed to perform select bucket against server (%v)", selectResp.Err) + return selectResp.Err + } + } + + client.features = helloResp.SrvFeatures + + logDebugf("Client Features: %+v", features) + logDebugf("Server Features: %+v", client.features) + + for _, feature := range client.features { + client.conn.EnableFeature(feature) + } + + err = cb(client, deadline) + if err != nil { + return err + } + + return nil +} + +// BytesAndError contains the raw bytes of the result of an operation, and/or the error that occurred. +type BytesAndError struct { + Err error + Bytes []byte +} + +func (client *memdClient) SaslAuth(k, v []byte, deadline time.Time, cb func(b []byte, err error)) error { + err := client.doBootstrapRequest( + &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSASLAuth, + Key: k, + Value: v, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + // Auth is special, auth continue is surfaced as an error + var val []byte + if resp != nil { + val = resp.Value + } + + cb(val, err) + }, + RetryStrategy: newFailFastRetryStrategy(), + }, + deadline, + ) + if err != nil { + return err + } + + return nil +} + +func (client *memdClient) SaslStep(k, v []byte, deadline time.Time, cb func(err error)) error { + err := client.doBootstrapRequest( + &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSASLStep, + Key: k, + Value: v, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + cb(err) + return + } + + cb(nil) + }, + RetryStrategy: newFailFastRetryStrategy(), + }, + deadline, + ) + if err != nil { + return err + } + + return nil +} + +func (client *memdClient) ExecSelectBucket(b []byte, deadline time.Time) (chan BytesAndError, error) { + completedCh := make(chan BytesAndError, 1) + err := client.doBootstrapRequest( + &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSelectBucket, + Key: b, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + completedCh <- BytesAndError{ + Err: err, + } + return + } + + completedCh <- BytesAndError{ + Bytes: resp.Value, + } + }, + RetryStrategy: newFailFastRetryStrategy(), + }, + deadline, + ) + if err != nil { + return nil, err + } + + return completedCh, nil +} + +func (client *memdClient) ExecGetErrorMap(version uint16, deadline time.Time) (chan BytesAndError, error) { + completedCh := make(chan BytesAndError, 1) + valueBuf := make([]byte, 2) + binary.BigEndian.PutUint16(valueBuf, version) + + err := client.doBootstrapRequest( + &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdGetErrorMap, + Value: valueBuf, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + completedCh <- BytesAndError{ + Err: err, + } + return + } + + completedCh <- BytesAndError{ + Bytes: resp.Value, + } + }, + RetryStrategy: newFailFastRetryStrategy(), + }, + deadline, + ) + if err != nil { + return nil, err + } + + return completedCh, nil +} + +func (client *memdClient) SaslListMechs(deadline time.Time, cb func(mechs []AuthMechanism, err error)) error { + err := client.doBootstrapRequest( + &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdSASLListMechs, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + cb(nil, err) + return + } + + mechs := strings.Split(string(resp.Value), " ") + var authMechs []AuthMechanism + for _, mech := range mechs { + authMechs = append(authMechs, AuthMechanism(mech)) + } + + cb(authMechs, nil) + }, + RetryStrategy: newFailFastRetryStrategy(), + }, + deadline, + ) + if err != nil { + return err + } + + return nil +} + +// ExecHelloResponse contains the features and/or error from an ExecHello operation. +type ExecHelloResponse struct { + SrvFeatures []memd.HelloFeature + Err error +} + +func (client *memdClient) ExecHello(clientID string, features []memd.HelloFeature, deadline time.Time) (chan ExecHelloResponse, error) { + appendFeatureCode := func(bytes []byte, feature memd.HelloFeature) []byte { + bytes = append(bytes, 0, 0) + binary.BigEndian.PutUint16(bytes[len(bytes)-2:], uint16(feature)) + return bytes + } + + var featureBytes []byte + for _, feature := range features { + featureBytes = appendFeatureCode(featureBytes, feature) + } + + completedCh := make(chan ExecHelloResponse) + err := client.doBootstrapRequest( + &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdHello, + Key: []byte(clientID), + Value: featureBytes, + }, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + completedCh <- ExecHelloResponse{ + Err: err, + } + return + } + + var srvFeatures []memd.HelloFeature + for i := 0; i < len(resp.Value); i += 2 { + feature := binary.BigEndian.Uint16(resp.Value[i:]) + srvFeatures = append(srvFeatures, memd.HelloFeature(feature)) + } + + completedCh <- ExecHelloResponse{ + SrvFeatures: srvFeatures, + } + }, + RetryStrategy: newFailFastRetryStrategy(), + }, + deadline, + ) + if err != nil { + return nil, err + } + + return completedCh, nil +} + +func (client *memdClient) doBootstrapRequest(req *memdQRequest, deadline time.Time) error { + start := time.Now() + req.SetTimer(time.AfterFunc(deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: req.Command.Name(), + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + + err := client.SendRequest(req) + if err != nil { + return err + } + + return nil +} + +func (client *memdClient) continueAfterAuth(bucketName string, continueAuthCh chan bool, deadline time.Time) chan BytesAndError { + if bucketName == "" { + return nil + } + + selectCh := make(chan BytesAndError, 1) + go func() { + success := <-continueAuthCh + if !success { + selectCh <- BytesAndError{} + return + } + execCh, err := client.ExecSelectBucket([]byte(bucketName), deadline) + if err != nil { + logDebugf("Failed to execute select bucket (%v)", err) + selectCh <- BytesAndError{Err: err} + return + } + + execResp := <-execCh + selectCh <- execResp + }() + + return selectCh +} + +func checkSupportsFeature(srvFeatures []memd.HelloFeature, feature memd.HelloFeature) bool { + for _, srvFeature := range srvFeatures { + if srvFeature == feature { + return true + } + } + return false +} + +func findNextAuthMechanism(authMechanisms []AuthMechanism, serverAuthMechanisms []AuthMechanism) (bool, AuthMechanism, []AuthMechanism) { + for { + if len(authMechanisms) <= 1 { + break + } + authMechanisms = authMechanisms[1:] + mech := authMechanisms[0] + for _, serverMech := range serverAuthMechanisms { + if mech == serverMech { + return true, mech, authMechanisms + } + } + } + + return false, "", authMechanisms +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdclientdialer_component.go b/vendor/github.com/couchbase/gocbcore/v9/memdclientdialer_component.go new file mode 100644 index 000000000000..163f681ec655 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdclientdialer_component.go @@ -0,0 +1,134 @@ +package gocbcore + +import ( + "crypto/tls" + "sync" + "time" +) + +type memdClientDialerComponent struct { + kvConnectTimeout time.Duration + serverWaitTimeout time.Duration + clientID string + breakerCfg CircuitBreakerConfig + tlsConfig *dynTLSConfig + + compressionMinSize int + compressionMinRatio float64 + disableDecompression bool + + serverFailuresLock sync.Mutex + serverFailures map[string]time.Time + + tracer *tracerComponent + zombieLogger *zombieLoggerComponent + + bootstrapProps bootstrapProps + bootstrapCB memdInitFunc +} + +type memdClientDialerProps struct { + KVConnectTimeout time.Duration + ServerWaitTimeout time.Duration + ClientID string + TLSConfig *dynTLSConfig + CompressionMinSize int + CompressionMinRatio float64 + DisableDecompression bool +} + +func newMemdClientDialerComponent(props memdClientDialerProps, bSettings bootstrapProps, breakerCfg CircuitBreakerConfig, + zLogger *zombieLoggerComponent, tracer *tracerComponent, bootstrapCB memdInitFunc) *memdClientDialerComponent { + return &memdClientDialerComponent{ + kvConnectTimeout: props.KVConnectTimeout, + serverWaitTimeout: props.ServerWaitTimeout, + clientID: props.ClientID, + tlsConfig: props.TLSConfig, + breakerCfg: breakerCfg, + zombieLogger: zLogger, + tracer: tracer, + serverFailures: make(map[string]time.Time), + + bootstrapProps: bSettings, + bootstrapCB: bootstrapCB, + + compressionMinSize: props.CompressionMinSize, + compressionMinRatio: props.CompressionMinRatio, + disableDecompression: props.DisableDecompression, + } +} + +func (mcc *memdClientDialerComponent) SlowDialMemdClient(address string, postCompleteHandler postCompleteErrorHandler) (*memdClient, error) { + mcc.serverFailuresLock.Lock() + failureTime := mcc.serverFailures[address] + mcc.serverFailuresLock.Unlock() + + if !failureTime.IsZero() { + waitedTime := time.Since(failureTime) + if waitedTime < mcc.serverWaitTimeout { + time.Sleep(mcc.serverWaitTimeout - waitedTime) + } + } + + deadline := time.Now().Add(mcc.kvConnectTimeout) + client, err := mcc.dialMemdClient(address, deadline, postCompleteHandler) + if err != nil { + mcc.serverFailuresLock.Lock() + mcc.serverFailures[address] = time.Now() + mcc.serverFailuresLock.Unlock() + + return nil, err + } + + err = client.Bootstrap(mcc.bootstrapProps, deadline, mcc.bootstrapCB) + if err != nil { + closeErr := client.Close() + if closeErr != nil { + logWarnf("Failed to close authentication client (%s)", closeErr) + } + mcc.serverFailuresLock.Lock() + mcc.serverFailures[address] = time.Now() + mcc.serverFailuresLock.Unlock() + + return nil, err + } + + return client, nil +} + +func (mcc *memdClientDialerComponent) dialMemdClient(address string, deadline time.Time, + postCompleteHandler postCompleteErrorHandler) (*memdClient, error) { + // Copy the tls configuration since we need to provide the hostname for each + // server that we connect to so that the certificate can be validated properly. + var tlsConfig *tls.Config + if mcc.tlsConfig != nil { + srvTLSConfig, err := mcc.tlsConfig.MakeForAddr(address) + if err != nil { + return nil, err + } + + tlsConfig = srvTLSConfig + } + + conn, err := dialMemdConn(address, tlsConfig, deadline) + if err != nil { + logDebugf("Failed to connect. %v", err) + return nil, err + } + + client := newMemdClient( + memdClientProps{ + ClientID: mcc.clientID, + DisableDecompression: mcc.disableDecompression, + CompressionMinRatio: mcc.compressionMinRatio, + CompressionMinSize: mcc.compressionMinSize, + }, + conn, + mcc.breakerCfg, + postCompleteHandler, + mcc.tracer, + mcc.zombieLogger, + ) + + return client, err +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdconn.go b/vendor/github.com/couchbase/gocbcore/v9/memdconn.go new file mode 100644 index 000000000000..82a94d9b4e4d --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdconn.go @@ -0,0 +1,97 @@ +package gocbcore + +import ( + "crypto/tls" + "io" + "net" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type memdConn interface { + LocalAddr() string + RemoteAddr() string + WritePacket(*memd.Packet) error + ReadPacket() (*memd.Packet, int, error) + Close() error + + EnableFeature(feature memd.HelloFeature) + IsFeatureEnabled(feature memd.HelloFeature) bool +} + +type memdConnWrap struct { + localAddr string + remoteAddr string + conn *memd.Conn + baseConn io.Closer +} + +func (s *memdConnWrap) LocalAddr() string { + return s.localAddr +} + +func (s *memdConnWrap) RemoteAddr() string { + return s.remoteAddr +} + +func (s *memdConnWrap) WritePacket(pkt *memd.Packet) error { + return s.conn.WritePacket(pkt) +} + +func (s *memdConnWrap) ReadPacket() (*memd.Packet, int, error) { + return s.conn.ReadPacket() +} + +func (s *memdConnWrap) EnableFeature(feature memd.HelloFeature) { + s.conn.EnableFeature(feature) +} + +func (s *memdConnWrap) IsFeatureEnabled(feature memd.HelloFeature) bool { + return s.conn.IsFeatureEnabled(feature) +} + +func (s *memdConnWrap) Close() error { + return s.baseConn.Close() +} + +func dialMemdConn(address string, tlsConfig *tls.Config, deadline time.Time) (memdConn, error) { + d := net.Dialer{ + Deadline: deadline, + } + + baseConn, err := d.Dial("tcp", address) + if err != nil { + return nil, err + } + + tcpConn, isTCPConn := baseConn.(*net.TCPConn) + if !isTCPConn || tcpConn == nil { + return nil, errCliInternalError + } + + err = tcpConn.SetNoDelay(false) + if err != nil { + logWarnf("Failed to disable TCP nodelay (%s)", err) + } + + var conn io.ReadWriteCloser + if tlsConfig == nil { + conn = tcpConn + } else { + tlsConn := tls.Client(tcpConn, tlsConfig) + err = tlsConn.Handshake() + if err != nil { + return nil, err + } + + conn = tlsConn + } + + return &memdConnWrap{ + conn: memd.NewConn(conn), + baseConn: conn, + localAddr: baseConn.LocalAddr().String(), + remoteAddr: address, + }, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdopmap.go b/vendor/github.com/couchbase/gocbcore/v9/memdopmap.go new file mode 100644 index 000000000000..cec4d558dbd1 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdopmap.go @@ -0,0 +1,139 @@ +package gocbcore + +import ( + "sync/atomic" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type memdOpMapItem struct { + value *memdQRequest + next *memdOpMapItem +} + +// This is used to store operations while they are pending +// a response from the server to allow mapping of a response +// opaque back to the originating request. This queue takes +// advantage of the monotonic nature of the opaque values +// and synchronous responses from the server to nearly always +// return the request without needing to iterate at all. +type memdOpMap struct { + opIndex uint32 + + first *memdOpMapItem + last *memdOpMapItem +} + +// Add a new request to the bottom of the op queue. +func (q *memdOpMap) Add(req *memdQRequest) { + q.opIndex++ + atomic.StoreUint32(&req.Opaque, q.opIndex) + + item := &memdOpMapItem{ + value: req, + next: nil, + } + + if q.last == nil { + q.first = item + q.last = item + } else { + q.last.next = item + q.last = item + } +} + +// Removes a request from the op queue. Expects to be passed +// the request to remove, along with the request that +// immediately precedes it in the queue. +func (q *memdOpMap) remove(prev *memdOpMapItem, req *memdOpMapItem) { + if prev == nil { + q.first = req.next + if q.first == nil { + q.last = nil + } + return + } + prev.next = req.next + if prev.next == nil { + q.last = prev + } +} + +// Removes a specific request from the op queue. +func (q *memdOpMap) Remove(req *memdQRequest) bool { + cur := q.first + var prev *memdOpMapItem + for cur != nil { + if cur.value == req { + q.remove(prev, cur) + return true + } + prev = cur + cur = cur.next + } + + return false +} + +// This allows searching through the list of requests for a specific +// request. This is only used by the DCP server bug fix for MB-26363. +func (q *memdOpMap) FindOpenStream(vbID uint16) *memdQRequest { + cur := q.first + for cur != nil { + if cur.value.Magic == memd.CmdMagicReq && + cur.value.Command == memd.CmdDcpStreamReq && + cur.value.Vbucket == vbID { + return cur.value + } + cur = cur.next + } + + return nil +} + +// Locates a request (searching FIFO-style) in the op queue using +// the opaque value that was assigned to it when it was dispatched. +func (q *memdOpMap) Find(opaque uint32) *memdQRequest { + cur := q.first + for cur != nil { + if cur.value.Opaque == opaque { + return cur.value + } + cur = cur.next + } + + return nil +} + +// Locates a request (searching FIFO-style) in the op queue using +// the opaque value that was assigned to it when it was dispatched. +// It then removes the request from the queue if it is not persistent +// or if alwaysRemove is set to true. +func (q *memdOpMap) FindAndMaybeRemove(opaque uint32, force bool) *memdQRequest { + cur := q.first + var prev *memdOpMapItem + for cur != nil { + if cur.value.Opaque == opaque { + if !cur.value.Persistent || force { + q.remove(prev, cur) + } + + return cur.value + } + prev = cur + cur = cur.next + } + + return nil +} + +// Clears the queue of all requests and calls the passed function +// once for each request found in the queue. +func (q *memdOpMap) Drain(cb func(*memdQRequest)) { + for cur := q.first; cur != nil; cur = cur.next { + cb(cur.value) + } + q.first = nil + q.last = nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdopqueue.go b/vendor/github.com/couchbase/gocbcore/v9/memdopqueue.go new file mode 100644 index 000000000000..016fba070d75 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdopqueue.go @@ -0,0 +1,189 @@ +package gocbcore + +import ( + "container/list" + "errors" + "fmt" + "sync" + "sync/atomic" + "unsafe" +) + +var ( + errOpQueueClosed = errors.New("queue is closed") + errOpQueueFull = errors.New("queue is full") + errAlreadyQueued = errors.New("request was already queued somewhere else") +) + +type memdOpConsumer struct { + parent *memdOpQueue + isClosed bool +} + +func (c *memdOpConsumer) Queue() *memdOpQueue { + return c.parent +} + +func (c *memdOpConsumer) Pop() *memdQRequest { + return c.parent.pop(c) +} + +func (c *memdOpConsumer) Close() { + c.parent.closeConsumer(c) +} + +type memdOpQueue struct { + lock sync.Mutex + signal *sync.Cond + items *list.List + isOpen bool +} + +func newMemdOpQueue() *memdOpQueue { + q := memdOpQueue{ + isOpen: true, + items: list.New(), + } + q.signal = sync.NewCond(&q.lock) + return &q +} + +// nolint: unused +func (q *memdOpQueue) debugString() string { + var outStr string + q.lock.Lock() + + outStr += fmt.Sprintf("Num Items: %d\n", q.items.Len()) + outStr += fmt.Sprintf("Is Open: %t", q.isOpen) + + q.lock.Unlock() + return outStr +} + +func (q *memdOpQueue) Remove(req *memdQRequest) bool { + q.lock.Lock() + + if !atomic.CompareAndSwapPointer(&req.queuedWith, unsafe.Pointer(q), nil) { + q.lock.Unlock() + return false + } + + for e := q.items.Front(); e != nil; e = e.Next() { + if e.Value.(*memdQRequest) == req { + q.items.Remove(e) + break + } + } + + q.lock.Unlock() + + return true +} + +func (q *memdOpQueue) Push(req *memdQRequest, maxItems int) error { + q.lock.Lock() + if !q.isOpen { + q.lock.Unlock() + return errOpQueueClosed + } + + if maxItems > 0 && q.items.Len() >= maxItems { + q.lock.Unlock() + return errOpQueueFull + } + + if !atomic.CompareAndSwapPointer(&req.queuedWith, nil, unsafe.Pointer(q)) { + q.lock.Unlock() + return errAlreadyQueued + } + + if req.isCancelled() { + atomic.CompareAndSwapPointer(&req.queuedWith, unsafe.Pointer(q), nil) + q.lock.Unlock() + + return errRequestCanceled + } + + q.items.PushBack(req) + q.lock.Unlock() + + q.signal.Broadcast() + return nil +} + +func (q *memdOpQueue) Consumer() *memdOpConsumer { + return &memdOpConsumer{ + parent: q, + isClosed: false, + } +} + +func (q *memdOpQueue) closeConsumer(c *memdOpConsumer) { + q.lock.Lock() + c.isClosed = true + q.lock.Unlock() + + q.signal.Broadcast() +} + +func (q *memdOpQueue) pop(c *memdOpConsumer) *memdQRequest { + q.lock.Lock() + + for q.isOpen && !c.isClosed && q.items.Len() == 0 { + q.signal.Wait() + } + + if !q.isOpen || c.isClosed { + q.lock.Unlock() + return nil + } + + e := q.items.Front() + q.items.Remove(e) + + req, ok := e.Value.(*memdQRequest) + if !ok { + logErrorf("Encountered incorrect type in memdOpQueue") + return q.pop(c) + } + + atomic.CompareAndSwapPointer(&req.queuedWith, unsafe.Pointer(q), nil) + + q.lock.Unlock() + + return req +} + +type drainCallback func(*memdQRequest) + +func (q *memdOpQueue) Drain(cb drainCallback) { + q.lock.Lock() + + if q.isOpen { + logErrorf("Attempted to Drain open memdOpQueue, ignoring") + q.lock.Unlock() + return + } + + for e := q.items.Front(); e != nil; e = e.Next() { + req, ok := e.Value.(*memdQRequest) + if !ok { + logErrorf("Encountered incorrect type in memdOpQueue") + continue + } + + atomic.CompareAndSwapPointer(&req.queuedWith, unsafe.Pointer(q), nil) + + cb(req) + } + + q.lock.Unlock() +} + +func (q *memdOpQueue) Close() { + q.lock.Lock() + q.isOpen = false + q.lock.Unlock() + + q.signal.Broadcast() +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdpipeline.go b/vendor/github.com/couchbase/gocbcore/v9/memdpipeline.go new file mode 100644 index 000000000000..78fe5fc9e759 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdpipeline.go @@ -0,0 +1,174 @@ +package gocbcore + +import ( + "errors" + "fmt" + "sync" +) + +var ( + errPipelineClosed = errors.New("pipeline has been closed") + errPipelineFull = errors.New("pipeline is too full") +) + +type memdGetClientFn func() (*memdClient, error) + +type memdPipeline struct { + address string + getClientFn memdGetClientFn + maxItems int + queue *memdOpQueue + maxClients int + clients []*memdPipelineClient + clientsLock sync.Mutex +} + +func newPipeline(address string, maxClients, maxItems int, getClientFn memdGetClientFn) *memdPipeline { + return &memdPipeline{ + address: address, + getClientFn: getClientFn, + maxClients: maxClients, + maxItems: maxItems, + queue: newMemdOpQueue(), + } +} + +func newDeadPipeline(maxItems int) *memdPipeline { + return newPipeline("", 0, maxItems, nil) +} + +// nolint: unused +func (pipeline *memdPipeline) debugString() string { + var outStr string + + if pipeline.address != "" { + outStr += fmt.Sprintf("Address: %s\n", pipeline.address) + outStr += fmt.Sprintf("Max Clients: %d\n", pipeline.maxClients) + outStr += fmt.Sprintf("Num Clients: %d\n", len(pipeline.clients)) + outStr += fmt.Sprintf("Max Items: %d\n", pipeline.maxItems) + } else { + outStr += "Dead-Server Queue\n" + } + + outStr += "Op Queue:\n" + outStr += reindentLog(" ", pipeline.queue.debugString()) + + return outStr +} + +func (pipeline *memdPipeline) Clients() []*memdPipelineClient { + pipeline.clientsLock.Lock() + defer pipeline.clientsLock.Unlock() + return pipeline.clients +} + +func (pipeline *memdPipeline) Address() string { + return pipeline.address +} + +func (pipeline *memdPipeline) StartClients() { + pipeline.clientsLock.Lock() + defer pipeline.clientsLock.Unlock() + + for len(pipeline.clients) < pipeline.maxClients { + client := newMemdPipelineClient(pipeline) + pipeline.clients = append(pipeline.clients, client) + + go client.Run() + } +} + +func (pipeline *memdPipeline) sendRequest(req *memdQRequest, maxItems int) error { + err := pipeline.queue.Push(req, maxItems) + if err == errOpQueueClosed { + return errPipelineClosed + } else if err == errOpQueueFull { + return errPipelineFull + } else if err != nil { + return err + } + + return nil +} + +func (pipeline *memdPipeline) RequeueRequest(req *memdQRequest) error { + return pipeline.sendRequest(req, 0) +} + +func (pipeline *memdPipeline) SendRequest(req *memdQRequest) error { + return pipeline.sendRequest(req, pipeline.maxItems) +} + +// Performs a takeover of another pipeline. Note that this does not +// take over the requests queued in the old pipeline, and those must +// be drained and processed separately. +func (pipeline *memdPipeline) Takeover(oldPipeline *memdPipeline) { + if oldPipeline.address != pipeline.address { + logErrorf("Attempted pipeline takeover for differing address") + + // We try to 'gracefully' error here by resolving all the requests as + // errors, but allowing the application to continue. + err := oldPipeline.Close() + if err != nil { + // Log and continue with this non-fatal error. + logDebugf("Failed to shutdown old pipeline (%s)", err) + } + + // Drain all the requests as an internal error so they are not lost + oldPipeline.Drain(func(req *memdQRequest) { + req.tryCallback(nil, errCliInternalError) + }) + + return + } + + // Migrate all the clients to the new pipeline + oldPipeline.clientsLock.Lock() + clients := oldPipeline.clients + oldPipeline.clients = nil + oldPipeline.clientsLock.Unlock() + + pipeline.clientsLock.Lock() + pipeline.clients = clients + for _, client := range pipeline.clients { + client.ReassignTo(pipeline) + } + pipeline.clientsLock.Unlock() + + // Shut down the old pipelines queue, this will force all the + // clients to 'refresh' their consumer, and pick up the new + // pipeline queue from the new pipeline. This will also block + // any writers from sending new requests here if they have an + // out of date route config. + oldPipeline.queue.Close() +} + +func (pipeline *memdPipeline) Close() error { + // Shut down all the clients + pipeline.clientsLock.Lock() + clients := pipeline.clients + pipeline.clients = nil + pipeline.clientsLock.Unlock() + + hadErrors := false + for _, pipecli := range clients { + err := pipecli.Close() + if err != nil { + logErrorf("failed to shutdown pipeline client: %s", err) + hadErrors = true + } + } + + // Kill the queue, forcing everyone to stop + pipeline.queue.Close() + + if hadErrors { + return errCliInternalError + } + + return nil +} + +func (pipeline *memdPipeline) Drain(cb func(*memdQRequest)) { + pipeline.queue.Drain(cb) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdpipelineclient.go b/vendor/github.com/couchbase/gocbcore/v9/memdpipelineclient.go new file mode 100644 index 000000000000..5b3337b73203 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdpipelineclient.go @@ -0,0 +1,264 @@ +package gocbcore + +import ( + "errors" + "io" + "sync" + "sync/atomic" +) + +type memdPipelineClient struct { + parent *memdPipeline + address string + client *memdClient + consumer *memdOpConsumer + lock sync.Mutex + closedSig chan struct{} + state uint32 + + connectError error +} + +func newMemdPipelineClient(parent *memdPipeline) *memdPipelineClient { + return &memdPipelineClient{ + parent: parent, + address: parent.address, + closedSig: make(chan struct{}), + state: uint32(EndpointStateDisconnected), + } +} + +func (pipecli *memdPipelineClient) State() EndpointState { + return EndpointState(atomic.LoadUint32(&pipecli.state)) +} + +func (pipecli *memdPipelineClient) Error() error { + pipecli.lock.Lock() + defer pipecli.lock.Unlock() + return pipecli.connectError +} + +func (pipecli *memdPipelineClient) ReassignTo(parent *memdPipeline) { + pipecli.lock.Lock() + pipecli.parent = parent + oldConsumer := pipecli.consumer + pipecli.consumer = nil + pipecli.lock.Unlock() + + if oldConsumer != nil { + oldConsumer.Close() + } +} + +func (pipecli *memdPipelineClient) ioLoop(client *memdClient) { + pipecli.lock.Lock() + if pipecli.parent == nil { + logDebugf("Pipeline client ioLoop started with no parent pipeline") + pipecli.lock.Unlock() + + err := client.Close() + if err != nil { + logErrorf("Failed to close client for shut down ioLoop (%s)", err) + } + + return + } + + pipecli.client = client + pipecli.lock.Unlock() + + killSig := make(chan struct{}) + + // This goroutine is responsible for monitoring the client and handling + // the cleanup whenever it shuts down. All cases of the client being + // shut down flow through this goroutine, even cases where we may already + // be aware that the client is shutdown, outside this scope. + go func() { + logDebugf("Pipeline client `%s/%p` client watcher starting...", pipecli.address, pipecli) + + <-client.CloseNotify() + + logDebugf("Pipeline client `%s/%p` client died", pipecli.address, pipecli) + + pipecli.lock.Lock() + pipecli.client = nil + activeConsumer := pipecli.consumer + pipecli.consumer = nil + pipecli.lock.Unlock() + + logDebugf("Pipeline client `%s/%p` closing consumer %p", pipecli.address, pipecli, activeConsumer) + + // If we have a consumer, we need to close it to signal the loop below that + // something has happened. If there is no consumer, we don't need to signal + // as the loop below will already be in the process of fetching a new one, + // where it will inevitably detect the problem. + if activeConsumer != nil { + activeConsumer.Close() + } + + killSig <- struct{}{} + }() + + logDebugf("Pipeline client `%s/%p` IO loop starting...", pipecli.address, pipecli) + + var localConsumer *memdOpConsumer + for { + if localConsumer == nil { + logDebugf("Pipeline client `%s/%p` fetching new consumer", pipecli.address, pipecli) + + pipecli.lock.Lock() + + if pipecli.consumer != nil { + // If we still have an active consumer, lets close it to make room for the new one + pipecli.consumer.Close() + pipecli.consumer = nil + } + + if pipecli.client == nil { + // The client has disconnected from the server, this only occurs AFTER the watcher + // goroutine running above has detected the client is closed and has cleaned it up. + pipecli.lock.Unlock() + break + } + + if pipecli.parent == nil { + // This pipelineClient has been shut down + logDebugf("Pipeline client `%s/%p` found no parent pipeline", pipecli.address, pipecli) + pipecli.lock.Unlock() + + // Close our client to force the watcher goroutine above to clean it up + err := client.Close() + if err != nil { + logErrorf("Pipeline client `%s/%p` failed to shut down client socket (%s)", pipecli.address, pipecli, err) + } + + break + } + + // Fetch a new consumer to use for this iteration + localConsumer = pipecli.parent.queue.Consumer() + pipecli.consumer = localConsumer + + pipecli.lock.Unlock() + } + + req := localConsumer.Pop() + if req == nil { + // Set the local consumer to null, this will force our normal logic to run + // which will clean up the original consumer and then attempt to acquire a + // new one if we are not being cleaned up. This is a minor code-optimization + // to avoid having to do a lock/unlock just to lock above anyways. It does + // have the downside of not being able to detect where we've looped around + // in error though. + localConsumer = nil + continue + } + + err := client.SendRequest(req) + if err != nil { + logDebugf("Pipeline client `%s/%p` encountered a socket write error: %v", pipecli.address, pipecli, err) + + if !errors.Is(err, io.EOF) { + // If we errored the write, and the client was not already closed, + // lets go ahead and close it. This will trigger the shutdown + // logic via the client watcher above. If the socket error was EOF + // we already did shut down, and the watcher should already be + // cleaning up. + err := client.Close() + if err != nil { + logErrorf("Pipeline client `%s/%p` failed to shut down errored client socket (%s)", pipecli.address, pipecli, err) + } + } + + // Send this request upwards to be processed by the higher level processor + shortCircuited, routeErr := client.postErrHandler(nil, req, err) + if !shortCircuited { + client.CancelRequest(req, err) + req.tryCallback(nil, routeErr) + break + } + + // Stop looping + break + } + } + + atomic.StoreUint32(&pipecli.state, uint32(EndpointStateDisconnecting)) + logDebugf("Pipeline client `%s/%p` waiting for client shutdown", pipecli.address, pipecli) + + // We must wait for the close wait goroutine to die as well before we can continue. + <-killSig + + logDebugf("Pipeline client `%s/%p` received client shutdown notification", pipecli.address, pipecli) +} + +func (pipecli *memdPipelineClient) Run() { + for { + logDebugf("Pipeline Client `%s/%p` preparing for new client loop", pipecli.address, pipecli) + atomic.StoreUint32(&pipecli.state, uint32(EndpointStateConnecting)) + + pipecli.lock.Lock() + pipeline := pipecli.parent + pipecli.lock.Unlock() + + if pipeline == nil { + // If our pipeline is nil, it indicates that we need to shut down. + logDebugf("Pipeline Client `%s/%p` is shutting down", pipecli.address, pipecli) + break + } + + logDebugf("Pipeline Client `%s/%p` retrieving new client connection for parent %p", pipecli.address, pipecli, pipeline) + client, err := pipeline.getClientFn() + if err != nil { + atomic.StoreUint32(&pipecli.state, uint32(EndpointStateDisconnected)) + pipecli.lock.Lock() + pipecli.connectError = err + pipecli.lock.Unlock() + continue + } + pipecli.lock.Lock() + pipecli.connectError = nil + pipecli.lock.Unlock() + atomic.StoreUint32(&pipecli.state, uint32(EndpointStateConnected)) + + // Runs until the connection has died (for whatever reason) + logDebugf("Pipeline Client `%s/%p` starting new client loop for %p", pipecli.address, pipecli, client) + pipecli.ioLoop(client) + } + + // Lets notify anyone who is watching that we are now shut down + close(pipecli.closedSig) +} + +// Close will close this pipeline client. Note that this method will not wait for +// everything to be cleaned up before returning. +func (pipecli *memdPipelineClient) Close() error { + logDebugf("Pipeline Client `%s/%p` received close request", pipecli.address, pipecli) + atomic.StoreUint32(&pipecli.state, uint32(EndpointStateDisconnecting)) + + // To shut down the client, we remove our reference to the parent. This + // causes our ioLoop see that we are being shut down and perform cleanup + // before exiting. + pipecli.lock.Lock() + pipecli.parent = nil + activeConsumer := pipecli.consumer + pipecli.consumer = nil + pipecli.lock.Unlock() + + // If we have an active consumer, we need to close it to cause the running + // ioLoop to unpause and pick up that our parent has been removed. Note + // that in some cases, we might not have an active consumer. This means + // that the ioLoop is about to try and fetch one, finding the missing + // parent in doing so. + if activeConsumer != nil { + activeConsumer.Close() + } + + // Lets wait till the ioLoop has shut everything down before returning. + <-pipecli.closedSig + atomic.StoreUint32(&pipecli.state, uint32(EndpointStateDisconnected)) + + logDebugf("Pipeline Client `%s/%p` has exited", pipecli.address, pipecli) + + return nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdqpackets.go b/vendor/github.com/couchbase/gocbcore/v9/memdqpackets.go new file mode 100644 index 000000000000..9352bb01ef77 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdqpackets.go @@ -0,0 +1,252 @@ +package gocbcore + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/couchbase/gocbcore/v9/memd" +) + +// The data for a response from a server. This includes the +// packets data along with some useful meta-data related to +// the response. +type memdQResponse struct { + memd.Packet + + sourceAddr string + sourceConnID string +} + +type callback func(*memdQResponse, *memdQRequest, error) + +// The data for a request that can be queued with a memdqueueconn, +// and can potentially be rerouted to multiple servers due to +// configuration changes. +type memdQRequest struct { + memd.Packet + + // Static routing properties + ReplicaIdx int + Callback callback + Persistent bool + + // This tracks when the request was dispatched so that we can + // properly prioritize older requests to try and meet timeout + // requirements. + dispatchTime time.Time + + // This stores a pointer to the server that currently own + // this request. This allows us to remove it from that list + // whenever the request is cancelled. + queuedWith unsafe.Pointer + + // This stores a pointer to the opList that currently is holding + // this request. This allows us to remove it form that list + // whenever the request is cancelled + waitingIn unsafe.Pointer + + // This keeps track of whether the request has been 'completed' + // which is synonymous with the callback having been invoked. + // This is an integer to allow us to atomically control it. + isCompleted uint32 + + // This is used to lock access to the request when processing + // a timeout, a response or spans + processingLock sync.Mutex + + // This stores the number of times that the item has been + // retried. It is used for various non-linear retry + // algorithms. + retryCount uint32 + + // This is used to determine what, if any, retry strategy to use + // when deciding whether to retry the request and calculating + // any back-off time period. + RetryStrategy RetryStrategy + + // This is the set of reasons why this request has been retried. + retryReasons []RetryReason + + // This is used to lock access to the request when processing + // retry reasons or attempts. + retryLock sync.Mutex + + // This is the timer which is used for cancellation of the request when deadlines are used. + timer atomic.Value + + // This stores a memdQRequestConnInfo value which is used to track connection information + // for the request. + connInfo atomic.Value + + RootTraceContext RequestSpanContext + cmdTraceSpan RequestSpan + netTraceSpan RequestSpan + + CollectionName string + ScopeName string +} + +type memdQRequestConnInfo struct { + lastDispatchedTo string + lastDispatchedFrom string + lastConnectionID string +} + +func (req *memdQRequest) RetryAttempts() uint32 { + req.retryLock.Lock() + defer req.retryLock.Unlock() + return req.retryCount +} + +func (req *memdQRequest) RetryReasons() []RetryReason { + req.retryLock.Lock() + defer req.retryLock.Unlock() + return req.retryReasons +} + +// Retries is here because we're locked into a publically exposed interface for RetryAttempts/RetryReasons. +// This function allows us to internally get count and reasons together preventing any races causing the count and +// reasons to mismatch. +func (req *memdQRequest) Retries() (uint32, []RetryReason) { + req.retryLock.Lock() + defer req.retryLock.Unlock() + return req.retryCount, req.retryReasons +} + +func (req *memdQRequest) retryStrategy() RetryStrategy { + return req.RetryStrategy +} + +func (req *memdQRequest) Identifier() string { + return fmt.Sprintf("0x%x", atomic.LoadUint32(&req.Opaque)) +} + +func (req *memdQRequest) Idempotent() bool { + _, ok := idempotentOps[req.Command] + return ok +} + +func (req *memdQRequest) ConnectionInfo() memdQRequestConnInfo { + p := req.connInfo.Load() + if p == nil { + return memdQRequestConnInfo{} + } + return p.(memdQRequestConnInfo) +} + +func (req *memdQRequest) SetConnectionInfo(info memdQRequestConnInfo) { + req.connInfo.Store(info) +} + +func (req *memdQRequest) SetTimer(t *time.Timer) { + req.timer.Store(t) +} + +func (req *memdQRequest) Timer() *time.Timer { + t := req.timer.Load() + if t == nil { + return nil + } + + return t.(*time.Timer) +} + +func (req *memdQRequest) recordRetryAttempt(retryReason RetryReason) { + req.retryLock.Lock() + defer req.retryLock.Unlock() + req.retryCount++ + found := false + for i := 0; i < len(req.retryReasons); i++ { + if req.retryReasons[i] == retryReason { + found = true + break + } + } + + // if idx is out of the range of retryReasons then it wasn't found. + if !found { + req.retryReasons = append(req.retryReasons, retryReason) + } +} + +func (req *memdQRequest) tryCallback(resp *memdQResponse, err error) bool { + t := req.Timer() + if t != nil { + t.Stop() + } + + if req.Persistent { + if err != nil { + if req.internalCancel(err) { + req.Callback(resp, req, err) + return true + } + } else { + if atomic.LoadUint32(&req.isCompleted) == 0 { + req.Callback(resp, req, err) + return true + } + } + } else { + if atomic.SwapUint32(&req.isCompleted, 1) == 0 { + req.Callback(resp, req, err) + return true + } + } + + return false +} + +func (req *memdQRequest) isCancelled() bool { + return atomic.LoadUint32(&req.isCompleted) != 0 +} + +func (req *memdQRequest) internalCancel(err error) bool { + req.processingLock.Lock() + + if atomic.SwapUint32(&req.isCompleted, 1) != 0 { + // Someone already completed this request + req.processingLock.Unlock() + return false + } + + t := req.Timer() + if t != nil { + // This timer might have already fired and that's how we got here, however we might have also got here + // via other means so we should always try to stop it. + t.Stop() + } + + queuedWith := (*memdOpQueue)(atomic.LoadPointer(&req.queuedWith)) + if queuedWith != nil { + queuedWith.Remove(req) + } + + waitingIn := (*memdClient)(atomic.LoadPointer(&req.waitingIn)) + if waitingIn != nil { + waitingIn.CancelRequest(req, err) + } + + cancelReqTrace(req) + req.processingLock.Unlock() + + return true +} + +func (req *memdQRequest) cancelWithCallback(err error) { + // Try to perform the cancellation, if it succeeds, we call the + // callback immediately on the users behalf. + if req.internalCancel(err) { + req.Callback(nil, req, err) + } +} + +func (req *memdQRequest) Cancel() { + // Try to perform the cancellation, if it succeeds, we call the + // callback immediately on the users behalf. + err := errRequestCanceled + req.cancelWithCallback(err) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/memdqsorter.go b/vendor/github.com/couchbase/gocbcore/v9/memdqsorter.go new file mode 100644 index 000000000000..b4b03f8fa723 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/memdqsorter.go @@ -0,0 +1,15 @@ +package gocbcore + +type memdQRequestSorter []*memdQRequest + +func (list memdQRequestSorter) Len() int { + return len(list) +} + +func (list memdQRequestSorter) Less(i, j int) bool { + return list[i].dispatchTime.Before(list[j].dispatchTime) +} + +func (list memdQRequestSorter) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/n1qlcomponent.go b/vendor/github.com/couchbase/gocbcore/v9/n1qlcomponent.go new file mode 100644 index 000000000000..3fde7f3f369a --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/n1qlcomponent.go @@ -0,0 +1,563 @@ +package gocbcore + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "strings" + "sync" + "sync/atomic" + "time" +) + +// N1QLRowReader providers access to the rows of a n1ql query +type N1QLRowReader struct { + streamer *queryStreamer +} + +// NextRow reads the next rows bytes from the stream +func (q *N1QLRowReader) NextRow() []byte { + return q.streamer.NextRow() +} + +// Err returns any errors that occurred during streaming. +func (q N1QLRowReader) Err() error { + err := q.streamer.Err() + if err != nil { + return err + } + + meta, metaErr := q.streamer.MetaData() + if metaErr != nil { + return metaErr + } + + descs, err := parseN1QLError(bytes.NewReader(meta)) + if err != nil { + return &N1QLError{ + InnerError: err, + Errors: descs, + } + } + + return nil +} + +// MetaData fetches the non-row bytes streamed in the response. +func (q *N1QLRowReader) MetaData() ([]byte, error) { + return q.streamer.MetaData() +} + +// Close immediately shuts down the connection +func (q *N1QLRowReader) Close() error { + return q.streamer.Close() +} + +// PreparedName returns the name of the prepared statement created when using enhanced prepared statements. +// If the prepared name has not been seen on the stream then this will return an error. +// Volatile: This API is subject to change. +func (q N1QLRowReader) PreparedName() (string, error) { + val := q.streamer.EarlyMetadata("prepared") + if val == nil { + return "", wrapN1QLError(nil, "", errors.New("prepared name not found in metadata")) + } + + var name string + err := json.Unmarshal(val, &name) + if err != nil { + return "", wrapN1QLError(nil, "", errors.New("failed to parse prepared name")) + } + + return name, nil +} + +// N1QLQueryOptions represents the various options available for a n1ql query. +type N1QLQueryOptions struct { + Payload []byte + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +func wrapN1QLError(req *httpRequest, statement string, err error) *N1QLError { + if err == nil { + err = errors.New("query error") + } + + ierr := &N1QLError{ + InnerError: err, + } + + if req != nil { + ierr.Endpoint = req.Endpoint + ierr.ClientContextID = req.UniqueID + ierr.RetryAttempts = req.RetryAttempts() + ierr.RetryReasons = req.RetryReasons() + } + + ierr.Statement = statement + + return ierr +} + +type jsonN1QLError struct { + Code uint32 `json:"code"` + Msg string `json:"msg"` +} + +type jsonN1QLErrorResponse struct { + Errors []jsonN1QLError +} + +func parseN1QLErrorResp(req *httpRequest, statement string, resp *HTTPResponse) *N1QLError { + errorDescs, err := parseN1QLError(resp.Body) + errOut := wrapN1QLError(req, statement, err) + errOut.Errors = errorDescs + return errOut +} + +func parseN1QLError(data io.Reader) ([]N1QLErrorDesc, error) { + var err error + var errorDescs []N1QLErrorDesc + + respBody, readErr := ioutil.ReadAll(data) + if readErr == nil { + var respParse jsonN1QLErrorResponse + parseErr := json.Unmarshal(respBody, &respParse) + if parseErr == nil { + + for _, jsonErr := range respParse.Errors { + errorDescs = append(errorDescs, N1QLErrorDesc{ + Code: jsonErr.Code, + Message: jsonErr.Msg, + }) + } + } + } + + if len(errorDescs) >= 1 { + firstErr := errorDescs[0] + errCode := firstErr.Code + errCodeGroup := errCode / 1000 + + if errCodeGroup == 4 { + err = errPlanningFailure + } + if errCodeGroup == 12 || errCodeGroup == 14 && errCode != 12004 && errCode != 12016 { + err = errIndexFailure + } + if errCode == 4040 || errCode == 4050 || errCode == 4060 || errCode == 4070 || errCode == 4080 || errCode == 4090 { + err = errPreparedStatementFailure + } + + if errCode == 3000 { + err = errParsingFailure + } + if errCode == 12009 { + err = errCasMismatch + } + if errCodeGroup == 5 { + err = errInternalServerFailure + } + if errCodeGroup == 10 { + err = errAuthenticationFailure + } + } + + return errorDescs, err +} + +type n1qlQueryComponent struct { + httpComponent httpComponentInterface + cfgMgr configManager + tracer *tracerComponent + + queryCache map[string]*n1qlQueryCacheEntry + cacheLock sync.RWMutex + + enhancedPreparedSupported uint32 +} + +type n1qlQueryCacheEntry struct { + enhanced bool + name string + encodedPlan string +} + +type n1qlJSONPrepData struct { + EncodedPlan string `json:"encoded_plan"` + Name string `json:"name"` +} + +func newN1QLQueryComponent(httpComponent httpComponentInterface, cfgMgr configManager, tracer *tracerComponent) *n1qlQueryComponent { + nqc := &n1qlQueryComponent{ + httpComponent: httpComponent, + cfgMgr: cfgMgr, + queryCache: make(map[string]*n1qlQueryCacheEntry), + tracer: tracer, + } + cfgMgr.AddConfigWatcher(nqc) + + return nqc +} + +func (nqc *n1qlQueryComponent) OnNewRouteConfig(cfg *routeConfig) { + if atomic.LoadUint32(&nqc.enhancedPreparedSupported) == 0 && + cfg.ContainsClusterCapability(1, "n1ql", "enhancedPreparedStatements") { + // Once supported this can't be unsupported + nqc.cacheLock.Lock() + nqc.queryCache = make(map[string]*n1qlQueryCacheEntry) + nqc.cacheLock.Unlock() + atomic.StoreUint32(&nqc.enhancedPreparedSupported, 1) + } +} + +// N1QLQuery executes a N1QL query +func (nqc *n1qlQueryComponent) N1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + tracer := nqc.tracer.CreateOpTrace("N1QLQuery", opts.TraceContext) + defer tracer.Finish() + + var payloadMap map[string]interface{} + err := json.Unmarshal(opts.Payload, &payloadMap) + if err != nil { + return nil, wrapN1QLError(nil, "", wrapError(err, "expected a JSON payload")) + } + + statement := getMapValueString(payloadMap, "statement", "") + clientContextID := getMapValueString(payloadMap, "client_context_id", "") + readOnly := getMapValueBool(payloadMap, "readonly", false) + + ctx, cancel := context.WithCancel(context.Background()) + ireq := &httpRequest{ + Service: N1qlService, + Method: "POST", + Path: "/query/service", + IsIdempotent: readOnly, + UniqueID: clientContextID, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + go func() { + resp, err := nqc.execute(ireq, payloadMap, statement) + if err != nil { + cancel() + cb(nil, err) + return + } + + cb(resp, nil) + }() + + return ireq, nil +} + +// PreparedN1QLQuery executes a prepared N1QL query +func (nqc *n1qlQueryComponent) PreparedN1QLQuery(opts N1QLQueryOptions, cb N1QLQueryCallback) (PendingOp, error) { + tracer := nqc.tracer.CreateOpTrace("N1QLQuery", opts.TraceContext) + defer tracer.Finish() + + if atomic.LoadUint32(&nqc.enhancedPreparedSupported) == 1 { + return nqc.executeEnhPrepared(opts, tracer, cb) + } + + return nqc.executeOldPrepared(opts, tracer, cb) +} + +func (nqc *n1qlQueryComponent) executeEnhPrepared(opts N1QLQueryOptions, tracer *opTracer, cb N1QLQueryCallback) (PendingOp, error) { + var payloadMap map[string]interface{} + err := json.Unmarshal(opts.Payload, &payloadMap) + if err != nil { + return nil, wrapN1QLError(nil, "", wrapError(err, "expected a JSON payload")) + } + + statement := getMapValueString(payloadMap, "statement", "") + clientContextID := getMapValueString(payloadMap, "client_context_id", "") + readOnly := getMapValueBool(payloadMap, "readonly", false) + + nqc.cacheLock.RLock() + cachedStmt := nqc.queryCache[statement] + nqc.cacheLock.RUnlock() + + ctx, cancel := context.WithCancel(context.Background()) + parentReqForCancel := &httpRequest{ + Context: ctx, + CancelFunc: cancel, + } + + go func() { + if cachedStmt != nil { + // Attempt to execute our cached query plan + delete(payloadMap, "statement") + payloadMap["prepared"] = cachedStmt.name + + ireq := &httpRequest{ + Service: N1qlService, + Method: "POST", + Path: "/query/service", + IsIdempotent: readOnly, + UniqueID: clientContextID, + Deadline: opts.Deadline, + // We need to not retry this request. + RetryStrategy: newFailFastRetryStrategy(), + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + results, err := nqc.execute(ireq, payloadMap, statement) + if err == nil { + cb(results, nil) + return + } + // if we fail to send the prepared statement name then retry a PREPARE. + delete(payloadMap, "prepared") + } + + payloadMap["statement"] = "PREPARE " + statement + payloadMap["auto_execute"] = true + + ireq := &httpRequest{ + Service: N1qlService, + Method: "POST", + Path: "/query/service", + IsIdempotent: readOnly, + UniqueID: clientContextID, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + results, err := nqc.execute(ireq, payloadMap, statement) + if err != nil { + cancel() + cb(nil, err) + return + } + + preparedName, err := results.PreparedName() + if err != nil { + logWarnf("Failed to read prepared name from result: %s", err) + cb(results, nil) + return + } + + cachedStmt = &n1qlQueryCacheEntry{} + cachedStmt.name = preparedName + cachedStmt.enhanced = true + + nqc.cacheLock.Lock() + nqc.queryCache[statement] = cachedStmt + nqc.cacheLock.Unlock() + + cb(results, nil) + }() + + return parentReqForCancel, nil +} + +func (nqc *n1qlQueryComponent) executeOldPrepared(opts N1QLQueryOptions, tracer *opTracer, cb N1QLQueryCallback) (PendingOp, error) { + var payloadMap map[string]interface{} + err := json.Unmarshal(opts.Payload, &payloadMap) + if err != nil { + return nil, wrapN1QLError(nil, "", wrapError(err, "expected a JSON payload")) + } + + statement := getMapValueString(payloadMap, "statement", "") + clientContextID := getMapValueString(payloadMap, "client_context_id", "") + readOnly := getMapValueBool(payloadMap, "readonly", false) + + nqc.cacheLock.RLock() + cachedStmt := nqc.queryCache[statement] + nqc.cacheLock.RUnlock() + + ctx, cancel := context.WithCancel(context.Background()) + parentReqForCancel := &httpRequest{ + Context: ctx, + CancelFunc: cancel, + } + + go func() { + if cachedStmt != nil { + // Attempt to execute our cached query plan + delete(payloadMap, "statement") + payloadMap["prepared"] = cachedStmt.name + payloadMap["encoded_plan"] = cachedStmt.encodedPlan + + ireq := &httpRequest{ + Service: N1qlService, + Method: "POST", + Path: "/query/service", + IsIdempotent: readOnly, + UniqueID: clientContextID, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + results, err := nqc.execute(ireq, payloadMap, statement) + if err == nil { + cb(results, nil) + return + } + + // if we fail to send the prepared statement name then retry a PREPARE. + } + + delete(payloadMap, "prepared") + delete(payloadMap, "encoded_plan") + delete(payloadMap, "auto_execute") + prepStatement := "PREPARE " + statement + payloadMap["statement"] = prepStatement + + ireq := &httpRequest{ + Service: N1qlService, + Method: "POST", + Path: "/query/service", + IsIdempotent: readOnly, + UniqueID: clientContextID, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + cacheRes, err := nqc.execute(ireq, payloadMap, statement) + if err != nil { + cancel() + cb(nil, err) + return + } + + b := cacheRes.NextRow() + if b == nil { + cancel() + cb(nil, wrapN1QLError(ireq, statement, errCliInternalError)) + return + } + + var prepData n1qlJSONPrepData + err = json.Unmarshal(b, &prepData) + if err != nil { + cancel() + cb(nil, wrapN1QLError(ireq, statement, err)) + return + } + + cachedStmt = &n1qlQueryCacheEntry{} + cachedStmt.name = prepData.Name + cachedStmt.encodedPlan = prepData.EncodedPlan + + nqc.cacheLock.Lock() + nqc.queryCache[statement] = cachedStmt + nqc.cacheLock.Unlock() + + // Attempt to execute our cached query plan + delete(payloadMap, "statement") + payloadMap["prepared"] = cachedStmt.name + payloadMap["encoded_plan"] = cachedStmt.encodedPlan + + resp, err := nqc.execute(ireq, payloadMap, statement) + if err != nil { + cancel() + cb(nil, err) + return + } + + cb(resp, nil) + }() + + return parentReqForCancel, nil +} + +func (nqc *n1qlQueryComponent) execute(ireq *httpRequest, payloadMap map[string]interface{}, statementForErr string) (*N1QLRowReader, error) { + start := time.Now() +ExecuteLoop: + for { + { // Produce an updated payload with the appropriate timeout + timeoutLeft := time.Until(ireq.Deadline) + payloadMap["timeout"] = timeoutLeft.String() + + newPayload, err := json.Marshal(payloadMap) + if err != nil { + return nil, wrapN1QLError(nil, "", wrapError(err, "failed to produce payload")) + } + ireq.Body = newPayload + } + + resp, err := nqc.httpComponent.DoInternalHTTPRequest(ireq, false) + if err != nil { + // execHTTPRequest will handle retrying due to in-flight socket close based + // on whether or not IsIdempotent is set on the httpRequest + return nil, wrapN1QLError(ireq, statementForErr, err) + } + + if resp.StatusCode != 200 { + n1qlErr := parseN1QLErrorResp(ireq, statementForErr, resp) + + var retryReason RetryReason + if len(n1qlErr.Errors) >= 1 { + firstErrDesc := n1qlErr.Errors[0] + + if firstErrDesc.Code == 4040 { + retryReason = QueryPreparedStatementFailureRetryReason + } else if firstErrDesc.Code == 4050 { + retryReason = QueryPreparedStatementFailureRetryReason + } else if firstErrDesc.Code == 4070 { + retryReason = QueryPreparedStatementFailureRetryReason + } else if strings.Contains(firstErrDesc.Message, "queryport.indexNotFound") { + retryReason = QueryIndexNotFoundRetryReason + } + } + + if retryReason == nil { + // n1qlErr is already wrapped here + return nil, n1qlErr + } + + shouldRetry, retryTime := retryOrchMaybeRetry(ireq, retryReason) + if !shouldRetry { + // n1qlErr is already wrapped here + return nil, n1qlErr + } + + select { + case <-time.After(time.Until(retryTime)): + continue ExecuteLoop + case <-time.After(time.Until(ireq.Deadline)): + err := &TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "N1QLQuery", + Opaque: ireq.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: ireq.retryReasons, + RetryAttempts: ireq.retryCount, + LastDispatchedTo: ireq.Endpoint, + } + return nil, wrapN1QLError(ireq, statementForErr, err) + } + } + + streamer, err := newQueryStreamer(resp.Body, "results") + if err != nil { + return nil, wrapN1QLError(ireq, statementForErr, err) + } + + return &N1QLRowReader{ + streamer: streamer, + }, nil + } +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/observecomponent.go b/vendor/github.com/couchbase/gocbcore/v9/observecomponent.go new file mode 100644 index 000000000000..f80aff10d008 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/observecomponent.go @@ -0,0 +1,256 @@ +package gocbcore + +import ( + "encoding/binary" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type bucketUtilsProvider interface { + KeyToVbucket(key []byte) (uint16, error) + BucketType() bucketType +} + +type observeComponent struct { + cidMgr *collectionsComponent + defaultRetryStrategy RetryStrategy + tracer *tracerComponent + bucketUtils bucketUtilsProvider +} + +func newObserveComponent(cidMgr *collectionsComponent, defaultRetryStrategy RetryStrategy, tracerCmpt *tracerComponent, + bucketUtils bucketUtilsProvider) *observeComponent { + return &observeComponent{ + cidMgr: cidMgr, + defaultRetryStrategy: defaultRetryStrategy, + tracer: tracerCmpt, + bucketUtils: bucketUtils, + } +} + +func (oc *observeComponent) Observe(opts ObserveOptions, cb ObserveCallback) (PendingOp, error) { + tracer := oc.tracer.CreateOpTrace("Observe", opts.TraceContext) + + if oc.bucketUtils.BucketType() != bktTypeCouchbase { + tracer.Finish() + return nil, errFeatureNotAvailable + } + + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Value) < 4 { + tracer.Finish() + cb(nil, errProtocol) + return + } + keyLen := int(binary.BigEndian.Uint16(resp.Value[2:])) + + if len(resp.Value) != 2+2+keyLen+1+8 { + tracer.Finish() + cb(nil, errProtocol) + return + } + keyState := memd.KeyState(resp.Value[2+2+keyLen]) + cas := binary.BigEndian.Uint64(resp.Value[2+2+keyLen+1:]) + + tracer.Finish() + cb(&ObserveResult{ + KeyState: keyState, + Cas: Cas(cas), + }, nil) + } + + vbID, err := oc.bucketUtils.KeyToVbucket(opts.Key) + if err != nil { + return nil, err + } + keyLen := len(opts.Key) + + valueBuf := make([]byte, 2+2+keyLen) + binary.BigEndian.PutUint16(valueBuf[0:], vbID) + binary.BigEndian.PutUint16(valueBuf[2:], uint16(keyLen)) + copy(valueBuf[4:], opts.Key) + + if opts.RetryStrategy == nil { + opts.RetryStrategy = oc.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdObserve, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: nil, + Value: valueBuf, + Vbucket: vbID, + CollectionID: opts.CollectionID, + }, + ReplicaIdx: opts.ReplicaIdx, + Callback: handler, + RootTraceContext: tracer.RootContext(), + CollectionName: opts.CollectionName, + ScopeName: opts.ScopeName, + RetryStrategy: opts.RetryStrategy, + } + + op, err := oc.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "Unlock", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} + +func (oc *observeComponent) ObserveVb(opts ObserveVbOptions, cb ObserveVbCallback) (PendingOp, error) { + tracer := oc.tracer.CreateOpTrace("ObserveVb", nil) + + if oc.bucketUtils.BucketType() != bktTypeCouchbase { + tracer.Finish() + return nil, errFeatureNotAvailable + } + + handler := func(resp *memdQResponse, _ *memdQRequest, err error) { + if err != nil { + tracer.Finish() + cb(nil, err) + return + } + + if len(resp.Value) < 1 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + formatType := resp.Value[0] + if formatType == 0 { + // Normal + if len(resp.Value) < 27 { + tracer.Finish() + cb(nil, errProtocol) + return + } + + vbID := binary.BigEndian.Uint16(resp.Value[1:]) + vbUUID := binary.BigEndian.Uint64(resp.Value[3:]) + persistSeqNo := binary.BigEndian.Uint64(resp.Value[11:]) + currentSeqNo := binary.BigEndian.Uint64(resp.Value[19:]) + + tracer.Finish() + cb(&ObserveVbResult{ + DidFailover: false, + VbID: vbID, + VbUUID: VbUUID(vbUUID), + PersistSeqNo: SeqNo(persistSeqNo), + CurrentSeqNo: SeqNo(currentSeqNo), + }, nil) + return + } else if formatType == 1 { + // Hard Failover + if len(resp.Value) < 43 { + cb(nil, errProtocol) + return + } + + vbID := binary.BigEndian.Uint16(resp.Value[1:]) + vbUUID := binary.BigEndian.Uint64(resp.Value[3:]) + persistSeqNo := binary.BigEndian.Uint64(resp.Value[11:]) + currentSeqNo := binary.BigEndian.Uint64(resp.Value[19:]) + oldVbUUID := binary.BigEndian.Uint64(resp.Value[27:]) + lastSeqNo := binary.BigEndian.Uint64(resp.Value[35:]) + + tracer.Finish() + cb(&ObserveVbResult{ + DidFailover: true, + VbID: vbID, + VbUUID: VbUUID(vbUUID), + PersistSeqNo: SeqNo(persistSeqNo), + CurrentSeqNo: SeqNo(currentSeqNo), + OldVbUUID: VbUUID(oldVbUUID), + LastSeqNo: SeqNo(lastSeqNo), + }, nil) + return + } else { + tracer.Finish() + cb(nil, errProtocol) + return + } + } + + valueBuf := make([]byte, 8) + binary.BigEndian.PutUint64(valueBuf[0:], uint64(opts.VbUUID)) + + if opts.RetryStrategy == nil { + opts.RetryStrategy = oc.defaultRetryStrategy + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdObserveSeqNo, + Datatype: 0, + Cas: 0, + Extras: nil, + Key: nil, + Value: valueBuf, + Vbucket: opts.VbID, + }, + ReplicaIdx: opts.ReplicaIdx, + Callback: handler, + RootTraceContext: tracer.RootContext(), + RetryStrategy: opts.RetryStrategy, + } + + op, err := oc.cidMgr.Dispatch(req) + if err != nil { + return nil, err + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "Unlock", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + return op, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/pendingop.go b/vendor/github.com/couchbase/gocbcore/v9/pendingop.go new file mode 100644 index 000000000000..c9271b6fed59 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/pendingop.go @@ -0,0 +1,31 @@ +package gocbcore + +import "sync/atomic" + +// PendingOp represents an outstanding operation within the client. +// This can be used to cancel an operation before it completes. +// This can also be used to Get information about the operation once +// it has completed (cancelled or successful). +type PendingOp interface { + Cancel() +} + +type multiPendingOp struct { + ops []PendingOp + completedOps uint32 + isIdempotent bool +} + +func (mp *multiPendingOp) Cancel() { + for _, op := range mp.ops { + op.Cancel() + } +} + +func (mp *multiPendingOp) CompletedOps() uint32 { + return atomic.LoadUint32(&mp.completedOps) +} + +func (mp *multiPendingOp) IncrementCompletedOps() uint32 { + return atomic.AddUint32(&mp.completedOps, 1) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/pipelinesnapshot.go b/vendor/github.com/couchbase/gocbcore/v9/pipelinesnapshot.go new file mode 100644 index 000000000000..a485a4b14554 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/pipelinesnapshot.go @@ -0,0 +1,40 @@ +package gocbcore + +type pipelineSnapshot struct { + state *kvMuxState + + idx int +} + +func (pi pipelineSnapshot) RevID() int64 { + return pi.state.revID +} + +func (pi pipelineSnapshot) NumPipelines() int { + return pi.state.NumPipelines() +} + +func (pi pipelineSnapshot) PipelineAt(idx int) *memdPipeline { + return pi.state.GetPipeline(idx) +} + +func (pi pipelineSnapshot) Iterate(offset int, cb func(*memdPipeline) bool) { + l := pi.state.NumPipelines() + pi.idx = offset + for iters := 0; iters < l; iters++ { + pi.idx = (pi.idx + 1) % l + p := pi.state.GetPipeline(pi.idx) + + if cb(p) { + return + } + } +} + +func (pi pipelineSnapshot) NodeByVbucket(vbID uint16, replicaID uint32) (int, error) { + if pi.state.vbMap == nil { + return 0, errUnsupportedOperation + } + + return pi.state.vbMap.NodeByVbucket(vbID, replicaID) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/pollercontroller.go b/vendor/github.com/couchbase/gocbcore/v9/pollercontroller.go new file mode 100644 index 000000000000..fae7afe63acf --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/pollercontroller.go @@ -0,0 +1,151 @@ +package gocbcore + +import ( + "errors" + "sync" +) + +type pollerController struct { + activeController configPollerController + controllerLock sync.Mutex + stopped bool + + cccpPoller *cccpConfigController + httpPoller *httpConfigController + cfgMgr configManager +} + +type configPollerController interface { + Pause(paused bool) + Done() chan struct{} + Stop() + Reset() + Error() error +} + +func newPollerController(cccpPoller *cccpConfigController, httpPoller *httpConfigController, cfgMgr configManager) *pollerController { + pc := &pollerController{ + cccpPoller: cccpPoller, + httpPoller: httpPoller, + cfgMgr: cfgMgr, + } + cfgMgr.AddConfigWatcher(pc) + + return pc +} + +// We listen out for the first config that comes in so that we (re)start the cccp if applicable. +func (pc *pollerController) OnNewRouteConfig(cfg *routeConfig) { + if cfg.bktType == bktTypeCouchbase || cfg.bktType == bktTypeMemcached { + pc.cfgMgr.RemoveConfigWatcher(pc) + } + + pc.controllerLock.Lock() + if cfg.bktType == bktTypeCouchbase && pc.activeController == pc.httpPoller { + logDebugf("Found couchbase bucket and HTTP poller in use. Resetting pollers to start cccp.") + pc.activeController = nil + pc.controllerLock.Unlock() + go func() { + pc.httpPoller.Stop() + pollerCh := pc.httpPoller.Done() + if pollerCh != nil { + <-pollerCh + } + pc.httpPoller.Reset() + pc.cccpPoller.Reset() + pc.Start() + }() + } else { + pc.controllerLock.Unlock() + } +} + +func (pc *pollerController) Start() { + pc.controllerLock.Lock() + if pc.stopped { + pc.controllerLock.Unlock() + return + } + + if pc.cccpPoller == nil { + pc.activeController = pc.httpPoller + pc.controllerLock.Unlock() + pc.httpPoller.DoLoop() + return + } + pc.activeController = pc.cccpPoller + pc.controllerLock.Unlock() + err := pc.cccpPoller.DoLoop() + if err != nil { + if pc.httpPoller == nil { + logErrorf("CCCP poller has exited for http fallback but no http poller is configured") + return + } + if isPollingFallbackError(err) { + pc.controllerLock.Lock() + // We can get into a weird race where the poller controller sent stop to the active controller but we then + // swap to a different one and so the Done() function never completes. + if pc.stopped { + pc.activeController = nil + pc.controllerLock.Unlock() + } else { + pc.activeController = pc.httpPoller + pc.controllerLock.Unlock() + pc.httpPoller.DoLoop() + } + } + } +} + +func (pc *pollerController) Pause(paused bool) { + pc.controllerLock.Lock() + controller := pc.activeController + pc.controllerLock.Unlock() + if controller != nil { + controller.Pause(paused) + } +} + +func (pc *pollerController) Stop() { + pc.controllerLock.Lock() + pc.stopped = true + controller := pc.activeController + pc.controllerLock.Unlock() + + if controller != nil { + controller.Stop() + } +} + +func (pc *pollerController) Done() chan struct{} { + pc.controllerLock.Lock() + controller := pc.activeController + pc.controllerLock.Unlock() + + if controller == nil { + return nil + } + return controller.Done() +} + +type pollerErrorProvider interface { + PollerError() error +} + +// If the underlying poller is currently in an error state then this will surface that error. +func (pc *pollerController) PollerError() error { + pc.controllerLock.Lock() + controller := pc.activeController + pc.controllerLock.Unlock() + + if controller == nil { + return nil + } + + return controller.Error() +} + +func isPollingFallbackError(err error) bool { + return errors.Is(err, ErrDocumentNotFound) || errors.Is(err, ErrUnsupportedOperation) || + errors.Is(err, errNoCCCPHosts) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/querystreamer.go b/vendor/github.com/couchbase/gocbcore/v9/querystreamer.go new file mode 100644 index 000000000000..3d19f8c4ae85 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/querystreamer.go @@ -0,0 +1,171 @@ +package gocbcore + +import ( + "encoding/json" + "errors" + "io" + "sync" +) + +// QueryResult allows access to the results of a N1QL query. +type queryStreamer struct { + metaDataBytes []byte + err error + lock sync.Mutex + + stream io.ReadCloser + streamer *rowStreamer +} + +func newQueryStreamer(stream io.ReadCloser, rowsAttrib string) (*queryStreamer, error) { + rowStreamer, err := newRowStreamer(stream, rowsAttrib) + if err != nil { + closeErr := stream.Close() + if closeErr != nil { + logDebugf("query stream close failed after error: %s", closeErr) + } + + return nil, err + } + + return &queryStreamer{ + stream: stream, + streamer: rowStreamer, + }, nil +} + +// Next assigns the next result from the results into the value pointer, returning whether the read was successful. +func (r *queryStreamer) NextRow() []byte { + rowBytes, err := r.streamer.NextRowBytes() + if err != nil { + r.finishWithError(err) + return nil + } + + // Check if there was any rows left + if rowBytes == nil { + r.finishWithoutError() + return nil + } + + return rowBytes +} + +// Err returns any errors that have occurred on the stream +func (r *queryStreamer) Err() error { + r.lock.Lock() + err := r.err + r.lock.Unlock() + + return err +} + +// EarlyMetadata returns the value (or nil) of an attribute from a query metadata before the query has completed. +func (r *queryStreamer) EarlyMetadata(key string) json.RawMessage { + return r.streamer.EarlyAttrib(key) +} + +func (r *queryStreamer) finishWithoutError() { + // Lets finalize the streamer so we Get the meta-data + metaDataBytes, err := r.streamer.Finalize() + if err != nil { + r.finishWithError(err) + return + } + + // Streamer is no longer valid now that its been Finalized + r.streamer = nil + + // Close the stream now that we are done with it + err = r.stream.Close() + if err != nil { + logWarnf("query stream close failed after meta-data: %s", err) + } + + // The stream itself is no longer valid + r.lock.Lock() + r.stream = nil + r.lock.Unlock() + + r.metaDataBytes = metaDataBytes +} + +func (r *queryStreamer) finishWithError(err error) { + // Lets record the error that happened + r.err = err + + // Our streamer is invalidated as soon as an error occurs + r.streamer = nil + + // Lets close the underlying stream + closeErr := r.stream.Close() + if closeErr != nil { + // We log this at debug level, but its almost always going to be an + // error since thats the most likely reason we are in finishWithError + logDebugf("query stream close failed after error: %s", closeErr) + } + + // The stream itself is now no longer valid + r.stream = nil +} + +// Close marks the results as closed, returning any errors that occurred during reading the results. +func (r *queryStreamer) Close() error { + // If an error occurred before, we should return that (forever) + err := r.Err() + if err != nil { + return err + } + + r.lock.Lock() + stream := r.stream + r.lock.Unlock() + + // If the stream is already closed, we can imply that no error occurred + if stream == nil { + return nil + } + + return stream.Close() +} + +// One assigns the first value from the results into the value pointer. +// It will close the results but not before iterating through all remaining +// results, as such this should only be used for very small resultsets - ideally +// of, at most, length 1. +func (r *queryStreamer) One() ([]byte, error) { + rowBytes := r.NextRow() + if rowBytes == nil { + if r.Err() == nil { + return nil, errors.New("no rows available") + } + + return nil, r.Close() + } + + // Read any remaining rows + for r.NextRow() != nil { + // skip + } + + // If an error occurred during the streaming, we need to + // return that, and make sure the result is closed + err := r.Err() + if err != nil { + return nil, err + } + + return rowBytes, nil +} + +func (r *queryStreamer) MetaData() ([]byte, error) { + if r.streamer != nil { + return nil, errors.New("the result must be closed before accessing the meta-data") + } + + if r.metaDataBytes == nil { + return nil, errors.New("an error occurred during querying which has made the meta-data unavailable") + } + + return r.metaDataBytes, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/retry.go b/vendor/github.com/couchbase/gocbcore/v9/retry.go new file mode 100644 index 000000000000..9332a9b70296 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/retry.go @@ -0,0 +1,305 @@ +package gocbcore + +import ( + "encoding/json" + "math" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +// RetryRequest is a request that can possibly be retried. +type RetryRequest interface { + RetryAttempts() uint32 + Identifier() string + Idempotent() bool + RetryReasons() []RetryReason + + retryStrategy() RetryStrategy + recordRetryAttempt(reason RetryReason) +} + +// RetryReason represents the reason for an operation possibly being retried. +type RetryReason interface { + AllowsNonIdempotentRetry() bool + AlwaysRetry() bool + Description() string +} + +type retryReason struct { + allowsNonIdempotentRetry bool + alwaysRetry bool + description string +} + +func (rr retryReason) AllowsNonIdempotentRetry() bool { + return rr.allowsNonIdempotentRetry +} + +func (rr retryReason) AlwaysRetry() bool { + return rr.alwaysRetry +} + +func (rr retryReason) Description() string { + return rr.description +} + +func (rr retryReason) String() string { + return rr.description +} + +func (rr retryReason) MarshalJSON() ([]byte, error) { + return json.Marshal(rr.description) +} + +var ( + // UnknownRetryReason indicates that the operation failed for an unknown reason. + UnknownRetryReason = retryReason{allowsNonIdempotentRetry: false, alwaysRetry: false, description: "UNKNOWN"} + + // SocketNotAvailableRetryReason indicates that the operation failed because the underlying socket was not available. + SocketNotAvailableRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "SOCKET_NOT_AVAILABLE"} + + // ServiceNotAvailableRetryReason indicates that the operation failed because the requested service was not available. + ServiceNotAvailableRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "SERVICE_NOT_AVAILABLE"} + + // NodeNotAvailableRetryReason indicates that the operation failed because the requested node was not available. + NodeNotAvailableRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "NODE_NOT_AVAILABLE"} + + // KVNotMyVBucketRetryReason indicates that the operation failed because it was sent to the wrong node for the vbucket. + KVNotMyVBucketRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: true, description: "KV_NOT_MY_VBUCKET"} + + // KVCollectionOutdatedRetryReason indicates that the operation failed because the collection ID on the request is outdated. + KVCollectionOutdatedRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: true, description: "KV_COLLECTION_OUTDATED"} + + // KVErrMapRetryReason indicates that the operation failed for an unsupported reason but the KV error map indicated + // that the operation can be retried. + KVErrMapRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "KV_ERROR_MAP_RETRY_INDICATED"} + + // KVLockedRetryReason indicates that the operation failed because the document was locked. + KVLockedRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "KV_LOCKED"} + + // KVTemporaryFailureRetryReason indicates that the operation failed because of a temporary failure. + KVTemporaryFailureRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "KV_TEMPORARY_FAILURE"} + + // KVSyncWriteInProgressRetryReason indicates that the operation failed because a sync write is in progress. + KVSyncWriteInProgressRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "KV_SYNC_WRITE_IN_PROGRESS"} + + // KVSyncWriteRecommitInProgressRetryReason indicates that the operation failed because a sync write recommit is in progress. + KVSyncWriteRecommitInProgressRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "KV_SYNC_WRITE_RE_COMMIT_IN_PROGRESS"} + + // ServiceResponseCodeIndicatedRetryReason indicates that the operation failed and the service responded stating that + // the request should be retried. + ServiceResponseCodeIndicatedRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "SERVICE_RESPONSE_CODE_INDICATED"} + + // SocketCloseInFlightRetryReason indicates that the operation failed because the socket was closed whilst the operation + // was in flight. + SocketCloseInFlightRetryReason = retryReason{allowsNonIdempotentRetry: false, alwaysRetry: false, description: "SOCKET_CLOSED_WHILE_IN_FLIGHT"} + + // PipelineOverloadedRetryReason indicates that the operation failed because the pipeline queue was full. + PipelineOverloadedRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: true, description: "PIPELINE_OVERLOADED"} + + // CircuitBreakerOpenRetryReason indicates that the operation failed because the circuit breaker for the underlying socket was open. + CircuitBreakerOpenRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "CIRCUIT_BREAKER_OPEN"} + + // QueryIndexNotFoundRetryReason indicates that the operation failed to to a missing query index + QueryIndexNotFoundRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "QUERY_INDEX_NOT_FOUND"} + + // QueryPreparedStatementFailureRetryReason indicates that the operation failed due to a prepared statement failure + QueryPreparedStatementFailureRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "QUERY_PREPARED_STATEMENT_FAILURE"} + + // AnalyticsTemporaryFailureRetryReason indicates that an analytics operation failed due to a temporary failure + AnalyticsTemporaryFailureRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "ANALYTICS_TEMPORARY_FAILURE"} + + // SearchTooManyRequestsRetryReason indicates that a search operation failed due to too many requests + SearchTooManyRequestsRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "SEARCH_TOO_MANY_REQUESTS"} + + // NotReadyRetryReason indicates that the WaitUntilReady operation is not ready + NotReadyRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: true, description: "NOT_READY"} + + // NoPipelineSnapshotRetryReason indicates that there was no pipeline snapshot available + NoPipelineSnapshotRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "NO_PIPELINE_SNAPSHOT"} + + // ConnectionErrorRetryReason indicates that there were errors reported by underlying connections + ConnectionErrorRetryReason = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: false, description: "CONNECTION_ERROR"} + + // MemdWriteFailure indicates that the operation failed because the write failed on the connection. + MemdWriteFailure = retryReason{allowsNonIdempotentRetry: true, alwaysRetry: true, description: "MEMD_WRITE_FAILURE"} +) + +// MaybeRetryRequest will possibly retry a request according to the strategy belonging to the request. +// It will use the reason to determine whether or not the failure reason is one that can be retried. +func (agent *Agent) MaybeRetryRequest(req RetryRequest, reason RetryReason) (bool, time.Time) { + return retryOrchMaybeRetry(req, reason) +} + +// RetryAction is used by a RetryStrategy to calculate the duration to wait before retrying an operation. +// Returning a value of 0 indicates to not retry. +type RetryAction interface { + Duration() time.Duration +} + +// NoRetryRetryAction represents an action that indicates to not retry. +type NoRetryRetryAction struct { +} + +// Duration is the length of time to wait before retrying an operation. +func (ra *NoRetryRetryAction) Duration() time.Duration { + return 0 +} + +// WithDurationRetryAction represents an action that indicates to retry with a given duration. +type WithDurationRetryAction struct { + WithDuration time.Duration +} + +// Duration is the length of time to wait before retrying an operation. +func (ra *WithDurationRetryAction) Duration() time.Duration { + return ra.WithDuration +} + +// RetryStrategy is to determine if an operation should be retried, and if so how long to wait before retrying. +type RetryStrategy interface { + RetryAfter(req RetryRequest, reason RetryReason) RetryAction +} + +// retryOrchMaybeRetry will possibly retry an operation according to the strategy belonging to the request. +// It will use the reason to determine whether or not the failure reason is one that can be retried. +func retryOrchMaybeRetry(req RetryRequest, reason RetryReason) (bool, time.Time) { + if reason.AlwaysRetry() { + duration := ControlledBackoff(req.RetryAttempts()) + logInfof("Will retry request. Backoff=%s, OperationID=%s. Reason=%s", duration, req.Identifier(), reason) + + req.recordRetryAttempt(reason) + + return true, time.Now().Add(duration) + } + + retryStrategy := req.retryStrategy() + if retryStrategy == nil { + return false, time.Time{} + } + + action := retryStrategy.RetryAfter(req, reason) + if action == nil { + logInfof("Won't retry request. OperationID=%s. Reason=%s", req.Identifier(), reason) + return false, time.Time{} + } + + duration := action.Duration() + if duration == 0 { + logInfof("Won't retry request. OperationID=%s. Reason=%s", req.Identifier(), reason) + return false, time.Time{} + } + + logInfof("Will retry request. Backoff=%s, OperationID=%s. Reason=%s", duration, req.Identifier(), reason) + req.recordRetryAttempt(reason) + + return true, time.Now().Add(duration) +} + +// failFastRetryStrategy represents a strategy that will never retry. +type failFastRetryStrategy struct { +} + +// newFailFastRetryStrategy returns a new FailFastRetryStrategy. +func newFailFastRetryStrategy() *failFastRetryStrategy { + return &failFastRetryStrategy{} +} + +// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation. +func (rs *failFastRetryStrategy) RetryAfter(req RetryRequest, reason RetryReason) RetryAction { + return &NoRetryRetryAction{} +} + +// BackoffCalculator is used by retry strategies to calculate backoff durations. +type BackoffCalculator func(retryAttempts uint32) time.Duration + +// BestEffortRetryStrategy represents a strategy that will keep retrying until it succeeds (or the caller times out +// the request). +type BestEffortRetryStrategy struct { + backoffCalculator BackoffCalculator +} + +// NewBestEffortRetryStrategy returns a new BestEffortRetryStrategy which will use the supplied calculator function +// to calculate retry durations. If calculator is nil then ControlledBackoff will be used. +func NewBestEffortRetryStrategy(calculator BackoffCalculator) *BestEffortRetryStrategy { + if calculator == nil { + calculator = ControlledBackoff + } + + return &BestEffortRetryStrategy{backoffCalculator: calculator} +} + +// RetryAfter calculates and returns a RetryAction describing how long to wait before retrying an operation. +func (rs *BestEffortRetryStrategy) RetryAfter(req RetryRequest, reason RetryReason) RetryAction { + if req.Idempotent() || reason.AllowsNonIdempotentRetry() { + return &WithDurationRetryAction{WithDuration: rs.backoffCalculator(req.RetryAttempts())} + } + + return &NoRetryRetryAction{} +} + +// ExponentialBackoff calculates a backoff time duration from the retry attempts on a given request. +func ExponentialBackoff(min, max time.Duration, backoffFactor float64) BackoffCalculator { + var minBackoff float64 = 1000000 // 1 Millisecond + var maxBackoff float64 = 500000000 // 500 Milliseconds + var factor float64 = 2 + + if min > 0 { + minBackoff = float64(min) + } + if max > 0 { + maxBackoff = float64(max) + } + if backoffFactor > 0 { + factor = backoffFactor + } + + return func(retryAttempts uint32) time.Duration { + backoff := minBackoff * (math.Pow(factor, float64(retryAttempts))) + + if backoff > maxBackoff { + backoff = maxBackoff + } + if backoff < minBackoff { + backoff = minBackoff + } + + return time.Duration(backoff) + } +} + +// ControlledBackoff calculates a backoff time duration from the retry attempts on a given request. +func ControlledBackoff(retryAttempts uint32) time.Duration { + switch retryAttempts { + case 0: + return 1 * time.Millisecond + case 1: + return 10 * time.Millisecond + case 2: + return 50 * time.Millisecond + case 3: + return 100 * time.Millisecond + case 4: + return 500 * time.Millisecond + default: + return 1000 * time.Millisecond + } +} + +var idempotentOps = map[memd.CmdCode]bool{ + memd.CmdGet: true, + memd.CmdGetReplica: true, + memd.CmdGetMeta: true, + memd.CmdSubDocGet: true, + memd.CmdSubDocExists: true, + memd.CmdSubDocGetCount: true, + memd.CmdNoop: true, + memd.CmdStat: true, + memd.CmdGetRandom: true, + memd.CmdCollectionsGetID: true, + memd.CmdCollectionsGetManifest: true, + memd.CmdGetClusterConfig: true, + memd.CmdObserve: true, + memd.CmdObserveSeqNo: true, +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/routeconfig.go b/vendor/github.com/couchbase/gocbcore/v9/routeconfig.go new file mode 100644 index 000000000000..60cdf46159cd --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/routeconfig.go @@ -0,0 +1,127 @@ +package gocbcore + +import "fmt" + +type routeConfig struct { + revID int64 + uuid string + name string + bktType bucketType + kvServerList []string + capiEpList []string + mgmtEpList []string + n1qlEpList []string + ftsEpList []string + cbasEpList []string + vbMap *vbucketMap + ketamaMap *ketamaContinuum + + clusterCapabilitiesVer []int + clusterCapabilities map[string][]string + + bucketCapabilities []string + bucketCapabilitiesVer string +} + +func (config *routeConfig) DebugString() string { + var outStr string + + outStr += fmt.Sprintf("Revision ID: %d\n", config.revID) + + outStr += "Capi Eps:\n" + for _, ep := range config.capiEpList { + outStr += fmt.Sprintf(" - %s\n", ep) + } + + outStr += "Mgmt Eps:\n" + for _, ep := range config.mgmtEpList { + outStr += fmt.Sprintf(" - %s\n", ep) + } + + outStr += "N1ql Eps:\n" + for _, ep := range config.n1qlEpList { + outStr += fmt.Sprintf(" - %s\n", ep) + } + + outStr += "FTS Eps:\n" + for _, ep := range config.ftsEpList { + outStr += fmt.Sprintf(" - %s\n", ep) + } + + outStr += "CBAS Eps:\n" + for _, ep := range config.cbasEpList { + outStr += fmt.Sprintf(" - %s\n", ep) + } + + if config.vbMap != nil { + outStr += "VBMap:\n" + outStr += fmt.Sprintf("%+v\n", config.vbMap) + } else { + outStr += "VBMap: not-used\n" + } + + if config.ketamaMap != nil { + outStr += "KetamaMap:\n" + outStr += fmt.Sprintf("%+v\n", config.ketamaMap) + } else { + outStr += "KetamaMap: not-used\n" + } + + // outStr += "Source Data: *" + //outStr += fmt.Sprintf(" Source Data: %v", rd.source) + + return outStr +} + +func (config *routeConfig) IsValid() bool { + if len(config.kvServerList) == 0 || len(config.mgmtEpList) == 0 { + return false + } + switch config.bktType { + case bktTypeCouchbase: + return config.vbMap != nil && config.vbMap.IsValid() + case bktTypeMemcached: + return config.ketamaMap != nil && config.ketamaMap.IsValid() + case bktTypeNone: + return true + default: + return false + } +} + +func (config *routeConfig) IsGCCCPConfig() bool { + return config.bktType == bktTypeNone +} + +func (config *routeConfig) ContainsClusterCapability(version int, category, capability string) bool { + caps := config.clusterCapabilities + capsVer := config.clusterCapabilitiesVer + if len(capsVer) == 0 || caps == nil { + return false + } + + if capsVer[0] == version { + for cat, catCapabilities := range caps { + switch cat { + case category: + for _, capa := range catCapabilities { + switch capa { + case capability: + return true + } + } + } + } + } + + return false +} + +func (config *routeConfig) ContainsBucketCapability(needleCap string) bool { + for _, capa := range config.bucketCapabilities { + if capa == needleCap { + return true + } + } + return false +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/rowstreamer.go b/vendor/github.com/couchbase/gocbcore/v9/rowstreamer.go new file mode 100644 index 000000000000..69bbea4bcd4b --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/rowstreamer.go @@ -0,0 +1,218 @@ +package gocbcore + +import ( + "encoding/json" + "errors" + "io" +) + +type rowStreamState int + +const ( + rowStreamStateStart rowStreamState = 0 + rowStreamStateRows rowStreamState = 1 + rowStreamStatePostRows rowStreamState = 2 + rowStreamStateEnd rowStreamState = 3 +) + +type rowStreamer struct { + decoder *json.Decoder + rowsAttrib string + attribs map[string]json.RawMessage + state rowStreamState +} + +func newRowStreamer(stream io.Reader, rowsAttrib string) (*rowStreamer, error) { + decoder := json.NewDecoder(stream) + + streamer := &rowStreamer{ + decoder: decoder, + rowsAttrib: rowsAttrib, + attribs: make(map[string]json.RawMessage), + state: rowStreamStateStart, + } + + if err := streamer.begin(); err != nil { + return nil, err + } + + return streamer, nil +} + +func (s *rowStreamer) begin() error { + if s.state != rowStreamStateStart { + return errors.New("unexpected parsing state during begin") + } + + // Read the opening { for the result + t, err := s.decoder.Token() + if err != nil { + return err + } + if delim, ok := t.(json.Delim); !ok || delim != '{' { + return errors.New("expected an opening brace for the result") + } + + for { + if !s.decoder.More() { + // We reached the end of the object + s.state = rowStreamStateEnd + break + } + + // Read the attribute name + t, err = s.decoder.Token() + if err != nil { + return err + } + key, keyOk := t.(string) + if !keyOk { + return errors.New("expected an object property name") + } + + if key == s.rowsAttrib { + // Read the opening [ for the rows + t, err = s.decoder.Token() + if err != nil { + return err + } + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return errors.New("expected an opening bracket for the rows") + } + + s.state = rowStreamStateRows + break + } + + // Read the attribute value + var value json.RawMessage + err = s.decoder.Decode(&value) + if err != nil { + return err + } + + // Save the attribute for the meta-data + s.attribs[key] = value + } + + return nil +} + +func (s *rowStreamer) readRow() (json.RawMessage, error) { + if s.state < rowStreamStateRows { + return nil, errors.New("unexpected parsing state during readRow") + } + + // If we've already read all rows, we return nil + if s.state > rowStreamStateRows { + return nil, nil + } + + // If there are no more rows, mark the rows finished and + // return nil to signal that we are at the end + if !s.decoder.More() { + s.state = rowStreamStatePostRows + return nil, nil + } + + // Decode this row and return a raw message + var msg json.RawMessage + err := s.decoder.Decode(&msg) + if err != nil { + return nil, err + } + + return msg, nil +} + +func (s *rowStreamer) end() error { + if s.state < rowStreamStatePostRows { + return errors.New("unexpected parsing state during end") + } + + // Check if we've already read everything + if s.state > rowStreamStatePostRows { + return nil + } + + // Read the ending ] for the rows + t, err := s.decoder.Token() + if err != nil { + return err + } + if delim, ok := t.(json.Delim); !ok || delim != ']' { + return errors.New("expected an ending bracket for the rows") + } + + for { + if !s.decoder.More() { + // We reached the end of the object + s.state = rowStreamStateEnd + break + } + + // Read the attribute name + t, err := s.decoder.Token() + if err != nil { + return err + } + + key, keyOk := t.(string) + if !keyOk { + return errors.New("expected an object property name") + } + + // Read the attribute value + var value json.RawMessage + err = s.decoder.Decode(&value) + if err != nil { + return err + } + + // Save the attribute for the meta-data + s.attribs[key] = value + } + + return nil +} + +func (s *rowStreamer) NextRowBytes() (json.RawMessage, error) { + return s.readRow() +} + +func (s *rowStreamer) Finalize() (json.RawMessage, error) { + // Make sure we've read until the end of the object + for { + row, err := s.readRow() + if err != nil { + return nil, err + } + + if row == nil { + break + } + } + + // Read the rest of the result object + err := s.end() + if err != nil { + return nil, err + } + + // Reconstruct the non-rows JSON to a raw message + metaBytes, err := json.Marshal(s.attribs) + if err != nil { + return nil, err + } + + return json.RawMessage(metaBytes), nil +} + +func (s *rowStreamer) EarlyAttrib(key string) json.RawMessage { + val, ok := s.attribs[key] + if !ok { + return nil + } + + return val +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/scram/scramclient.go b/vendor/github.com/couchbase/gocbcore/v9/scram/scramclient.go new file mode 100644 index 000000000000..b4c01edfb76a --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/scram/scramclient.go @@ -0,0 +1,275 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// Copyright (c) 2017 - Couchbase Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gocbcore + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-{SHA-1,etc} client per RFC5802. +// http://tools.ietf.org/html/rfc5802 +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new instance of the SCRAM client. +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + + return !(c.step > 2 || c.err != nil) +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 6 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + if _, err := escaper.WriteString(&c.authMsg, c.user); err != nil { + return err + } + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2]) + } + if err := c.saltPassword(salt, iterCount); err != nil { + return err + } + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + proof, err := c.clientProof() + if err != nil { + return err + } + c.out.Write(proof) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in) + } + + sig, err := c.serverSignature() + if err != nil { + return err + } + + if !bytes.Equal(sig, fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) error { + mac := hmac.New(c.newHash, []byte(c.pass)) + if _, err := mac.Write(salt); err != nil { + return err + } + if _, err := mac.Write([]byte{0, 0, 0, 1}); err != nil { + return err + } + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + if _, err := mac.Write(ui); err != nil { + return err + } + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi + return nil +} + +func (c *Client) clientProof() ([]byte, error) { + mac := hmac.New(c.newHash, c.saltedPass) + if _, err := mac.Write([]byte("Client Key")); err != nil { + return nil, err + } + clientKey := mac.Sum(nil) + hash := c.newHash() + if _, err := hash.Write(clientKey); err != nil { + return nil, err + } + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + if _, err := mac.Write(c.authMsg.Bytes()); err != nil { + return nil, err + } + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64, nil +} + +func (c *Client) serverSignature() ([]byte, error) { + mac := hmac.New(c.newHash, c.saltedPass) + if _, err := mac.Write([]byte("Server Key")); err != nil { + return nil, err + } + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + if _, err := mac.Write(c.authMsg.Bytes()); err != nil { + return nil, err + } + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/searchcomponent.go b/vendor/github.com/couchbase/gocbcore/v9/searchcomponent.go new file mode 100644 index 000000000000..3f27325fd567 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/searchcomponent.go @@ -0,0 +1,245 @@ +package gocbcore + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "strings" + "time" +) + +// SearchRowReader providers access to the rows of a view query +type SearchRowReader struct { + streamer *queryStreamer +} + +// NextRow reads the next rows bytes from the stream +func (q *SearchRowReader) NextRow() []byte { + return q.streamer.NextRow() +} + +// Err returns any errors that occurred during streaming. +func (q SearchRowReader) Err() error { + return q.streamer.Err() +} + +// MetaData fetches the non-row bytes streamed in the response. +func (q *SearchRowReader) MetaData() ([]byte, error) { + return q.streamer.MetaData() +} + +// Close immediately shuts down the connection +func (q *SearchRowReader) Close() error { + return q.streamer.Close() +} + +// SearchQueryOptions represents the various options available for a search query. +type SearchQueryOptions struct { + IndexName string + Payload []byte + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +type jsonSearchErrorResponse struct { + Status string +} + +func wrapSearchError(req *httpRequest, resp *HTTPResponse, indexName string, query interface{}, err error) *SearchError { + if err == nil { + err = errors.New("search error") + } + + ierr := &SearchError{ + InnerError: err, + } + + if req != nil { + ierr.Endpoint = req.Endpoint + ierr.RetryAttempts = req.RetryAttempts() + ierr.RetryReasons = req.RetryReasons() + } + + if resp != nil { + ierr.HTTPResponseCode = resp.StatusCode + } + + ierr.IndexName = indexName + ierr.Query = query + + return ierr +} + +func parseSearchError(req *httpRequest, indexName string, query interface{}, resp *HTTPResponse) *SearchError { + var err error + var errMsg string + + respBody, readErr := ioutil.ReadAll(resp.Body) + if readErr == nil { + var respParse jsonSearchErrorResponse + parseErr := json.Unmarshal(respBody, &respParse) + if parseErr == nil { + errMsg = respParse.Status + } + } + + if resp.StatusCode == 500 { + err = errInternalServerFailure + } + if resp.StatusCode == 401 || resp.StatusCode == 403 { + err = errAuthenticationFailure + } + if resp.StatusCode == 400 && strings.Contains(errMsg, "index not found") { + err = errIndexNotFound + } + + errOut := wrapSearchError(req, resp, indexName, query, err) + errOut.ErrorText = errMsg + return errOut +} + +type searchQueryComponent struct { + httpComponent *httpComponent + tracer *tracerComponent +} + +func newSearchQueryComponent(httpComponent *httpComponent, tracer *tracerComponent) *searchQueryComponent { + return &searchQueryComponent{ + httpComponent: httpComponent, + tracer: tracer, + } +} + +// SearchQuery executes a Search query +func (sqc *searchQueryComponent) SearchQuery(opts SearchQueryOptions, cb SearchQueryCallback) (PendingOp, error) { + tracer := sqc.tracer.CreateOpTrace("SearchQuery", opts.TraceContext) + defer tracer.Finish() + + var payloadMap map[string]interface{} + err := json.Unmarshal(opts.Payload, &payloadMap) + if err != nil { + return nil, wrapSearchError(nil, nil, "", nil, wrapError(err, "expected a JSON payload")) + } + + var ctlMap map[string]interface{} + if foundCtlMap, ok := payloadMap["ctl"]; ok { + if coercedCtlMap, ok := foundCtlMap.(map[string]interface{}); ok { + ctlMap = coercedCtlMap + } else { + return nil, wrapSearchError(nil, nil, "", nil, + wrapError(errInvalidArgument, "expected ctl to be a map")) + } + } else { + ctlMap = make(map[string]interface{}) + } + + indexName := opts.IndexName + query := payloadMap["query"] + + ctx, cancel := context.WithCancel(context.Background()) + reqURI := fmt.Sprintf("/api/index/%s/query", opts.IndexName) + ireq := &httpRequest{ + Service: FtsService, + Method: "POST", + Path: reqURI, + Body: opts.Payload, + IsIdempotent: true, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + start := time.Now() + + go func() { + ExecuteLoop: + for { + { // Produce an updated payload with the appropriate timeout + timeoutLeft := time.Until(ireq.Deadline) + + ctlMap["timeout"] = timeoutLeft / time.Millisecond + payloadMap["ctl"] = ctlMap + + newPayload, err := json.Marshal(payloadMap) + if err != nil { + cancel() + cb(nil, wrapSearchError(nil, nil, indexName, query, + wrapError(err, "failed to produce payload"))) + return + } + ireq.Body = newPayload + } + + resp, err := sqc.httpComponent.DoInternalHTTPRequest(ireq, false) + if err != nil { + cancel() + // execHTTPRequest will handle retrying due to in-flight socket close based + // on whether or not IsIdempotent is set on the httpRequest + cb(nil, wrapSearchError(ireq, nil, indexName, query, err)) + return + } + + if resp.StatusCode != 200 { + searchErr := parseSearchError(ireq, indexName, query, resp) + + var retryReason RetryReason + if searchErr.HTTPResponseCode == 429 { + retryReason = SearchTooManyRequestsRetryReason + } + + if retryReason == nil { + cancel() + // searchErr is already wrapped here + cb(nil, searchErr) + return + } + + shouldRetry, retryTime := retryOrchMaybeRetry(ireq, retryReason) + if !shouldRetry { + cancel() + // searchErr is already wrapped here + cb(nil, searchErr) + return + } + + select { + case <-time.After(time.Until(retryTime)): + continue ExecuteLoop + case <-time.After(time.Until(ireq.Deadline)): + cancel() + err := &TimeoutError{ + InnerError: errUnambiguousTimeout, + OperationID: "SearchQuery", + Opaque: ireq.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: ireq.retryReasons, + RetryAttempts: ireq.retryCount, + LastDispatchedTo: ireq.Endpoint, + } + cb(nil, wrapSearchError(ireq, nil, indexName, query, err)) + return + } + } + + streamer, err := newQueryStreamer(resp.Body, "hits") + if err != nil { + cancel() + cb(nil, wrapSearchError(ireq, resp, indexName, query, err)) + return + } + + cb(&SearchRowReader{ + streamer: streamer, + }, nil) + return + } + }() + + return ireq, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/statscomponent.go b/vendor/github.com/couchbase/gocbcore/v9/statscomponent.go new file mode 100644 index 000000000000..5ee79d4df084 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/statscomponent.go @@ -0,0 +1,207 @@ +package gocbcore + +import ( + "sync" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type statsComponent struct { + kvMux *kvMux + tracer *tracerComponent + defaultRetryStrategy RetryStrategy +} + +func newStatsComponent(kvMux *kvMux, defaultRetry RetryStrategy, tracer *tracerComponent) *statsComponent { + return &statsComponent{ + kvMux: kvMux, + tracer: tracer, + defaultRetryStrategy: defaultRetry, + } +} + +func (sc *statsComponent) Stats(opts StatsOptions, cb StatsCallback) (PendingOp, error) { + tracer := sc.tracer.CreateOpTrace("Stats", opts.TraceContext) + + iter, err := sc.kvMux.PipelineSnapshot() + if err != nil { + tracer.Finish() + return nil, err + } + + stats := make(map[string]SingleServerStats) + var statsLock sync.Mutex + + op := new(multiPendingOp) + op.isIdempotent = true + var expected uint32 + + pipelines := make([]*memdPipeline, 0) + + switch target := opts.Target.(type) { + case nil: + iter.Iterate(0, func(pipeline *memdPipeline) bool { + pipelines = append(pipelines, pipeline) + expected++ + return false + }) + case VBucketIDStatsTarget: + expected = 1 + + srvIdx, err := iter.NodeByVbucket(target.VbID, 0) + if err != nil { + return nil, err + } + + pipelines = append(pipelines, iter.PipelineAt(srvIdx)) + default: + return nil, errInvalidArgument + } + + opHandledLocked := func() { + completed := op.IncrementCompletedOps() + if expected-completed == 0 { + tracer.Finish() + cb(&StatsResult{ + Servers: stats, + }, nil) + } + } + + if opts.RetryStrategy == nil { + opts.RetryStrategy = sc.defaultRetryStrategy + } + + for _, pipeline := range pipelines { + serverAddress := pipeline.Address() + + handler := func(resp *memdQResponse, req *memdQRequest, err error) { + statsLock.Lock() + defer statsLock.Unlock() + + // Fetch the specific stats key for this server. Creating a new entry + // for the server if we did not previously have one. + curStats, ok := stats[serverAddress] + if !ok { + stats[serverAddress] = SingleServerStats{ + Stats: make(map[string]string), + } + curStats = stats[serverAddress] + } + + if err != nil { + // Store the first (and hopefully only) error into the Error field of this + // server's stats entry. + if curStats.Error == nil { + curStats.Error = err + } else { + logDebugf("Got additional error for stats: %s: %v", serverAddress, err) + } + + opHandledLocked() + + return + } + + // Check if the key length is zero. This indicates that we have reached + // the ending of the stats listing by this server. + if len(resp.Key) == 0 { + // As this is a persistent request, we must manually cancel it to remove + // it from the pending ops list. To ensure we do not race multiple cancels, + // we only handle it as completed the one time cancellation succeeds. + if req.internalCancel(err) { + opHandledLocked() + } + + return + } + + // Add the stat for this server to the list of stats. + curStats.Stats[string(resp.Key)] = string(resp.Value) + } + + req := &memdQRequest{ + Packet: memd.Packet{ + Magic: memd.CmdMagicReq, + Command: memd.CmdStat, + Datatype: 0, + Cas: 0, + Key: []byte(opts.Key), + Value: nil, + }, + Persistent: true, + Callback: handler, + RootTraceContext: tracer.RootContext(), + RetryStrategy: opts.RetryStrategy, + } + + curOp, err := sc.kvMux.DispatchDirectToAddress(req, pipeline) + if err != nil { + statsLock.Lock() + stats[serverAddress] = SingleServerStats{ + Error: err, + } + opHandledLocked() + statsLock.Unlock() + + continue + } + + if !opts.Deadline.IsZero() { + start := time.Now() + req.SetTimer(time.AfterFunc(opts.Deadline.Sub(start), func() { + connInfo := req.ConnectionInfo() + count, reasons := req.Retries() + req.cancelWithCallback(&TimeoutError{ + InnerError: errAmbiguousTimeout, + OperationID: "Unlock", + Opaque: req.Identifier(), + TimeObserved: time.Since(start), + RetryReasons: reasons, + RetryAttempts: count, + LastDispatchedTo: connInfo.lastDispatchedTo, + LastDispatchedFrom: connInfo.lastDispatchedFrom, + LastConnectionID: connInfo.lastConnectionID, + }) + })) + } + + op.ops = append(op.ops, curOp) + } + + return op, nil +} + +// SingleServerStats represents the stats returned from a single server. +type SingleServerStats struct { + Stats map[string]string + Error error +} + +// StatsTarget is used for providing a specific target to the Stats operation. +type StatsTarget interface { +} + +// VBucketIDStatsTarget indicates that a specific vbucket should be targeted by the Stats operation. +type VBucketIDStatsTarget struct { + VbID uint16 +} + +// StatsOptions encapsulates the parameters for a Stats operation. +type StatsOptions struct { + Key string + // Target indicates that something specific should be targeted by the operation. If left nil + // then the stats command will be sent to all servers. + Target StatsTarget + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +// StatsResult encapsulates the result of a Stats operation. +type StatsResult struct { + Servers map[string]SingleServerStats +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/syncclient.go b/vendor/github.com/couchbase/gocbcore/v9/syncclient.go new file mode 100644 index 000000000000..d5e279153c1c --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/syncclient.go @@ -0,0 +1,157 @@ +package gocbcore + +import ( + "encoding/binary" + "fmt" + "time" + + "github.com/couchbase/gocbcore/v9/memd" +) + +type memdSenderClient interface { + SupportsFeature(memd.HelloFeature) bool + Address() string + SendRequest(*memdQRequest) error +} + +type syncClient struct { + client memdSenderClient +} + +func (client *syncClient) SupportsFeature(feature memd.HelloFeature) bool { + return client.client.SupportsFeature(feature) +} + +func (client *syncClient) Address() string { + return client.client.Address() +} + +func (client *syncClient) doRequest(req *memd.Packet, deadline time.Time) (respOut *memd.Packet, errOut error) { + signal := make(chan bool, 1) + + qreq := memdQRequest{ + Packet: *req, + Callback: func(resp *memdQResponse, _ *memdQRequest, err error) { + if resp != nil { + respOut = &resp.Packet + } + errOut = err + signal <- true + }, + RetryStrategy: newFailFastRetryStrategy(), + } + + err := client.client.SendRequest(&qreq) + if err != nil { + return nil, err + } + + timeoutTmr := AcquireTimer(time.Until(deadline)) + select { + case <-signal: + ReleaseTimer(timeoutTmr, false) + return + case <-timeoutTmr.C: + ReleaseTimer(timeoutTmr, true) + qreq.cancelWithCallback(errAmbiguousTimeout) + <-signal + return + } +} + +func (client *syncClient) doBasicOp(cmd memd.CmdCode, k, v, e []byte, deadline time.Time) ([]byte, error) { + resp, err := client.doRequest( + &memd.Packet{ + Magic: memd.CmdMagicReq, + Command: cmd, + Key: k, + Value: v, + Extras: e, + }, + deadline, + ) + + // We do it this way as the response value could still be useful even if an + // error status code is returned. For instance, StatusAuthContinue still + // contains authentication stepping information. + if resp == nil { + return nil, err + } + + return resp.Value, err +} + +func (client *syncClient) ExecDcpControl(key string, value string, deadline time.Time) error { + _, err := client.doBasicOp(memd.CmdDcpControl, []byte(key), []byte(value), nil, deadline) + return err +} + +func (client *syncClient) ExecGetClusterConfig(deadline time.Time) ([]byte, error) { + return client.doBasicOp(memd.CmdGetClusterConfig, nil, nil, nil, deadline) +} + +func (client *syncClient) ExecOpenDcpConsumer(streamName string, openFlags memd.DcpOpenFlag, deadline time.Time) error { + _, ok := client.client.(*memdClient) + if !ok { + return errCliInternalError + } + + extraBuf := make([]byte, 8) + binary.BigEndian.PutUint32(extraBuf[0:], 0) + binary.BigEndian.PutUint32(extraBuf[4:], uint32((openFlags & ^memd.DcpOpenFlag(3))|memd.DcpOpenFlagProducer)) + _, err := client.doBasicOp(memd.CmdDcpOpenConnection, []byte(streamName), nil, extraBuf, deadline) + return err +} + +func (client *syncClient) ExecEnableDcpNoop(period time.Duration, deadline time.Time) error { + _, ok := client.client.(*memdClient) + if !ok { + return errCliInternalError + } + // The client will always reply to No-Op's. No need to enable it + + err := client.ExecDcpControl("enable_noop", "true", deadline) + if err != nil { + return err + } + + periodStr := fmt.Sprintf("%d", period/time.Second) + err = client.ExecDcpControl("set_noop_interval", periodStr, deadline) + if err != nil { + return err + } + + return nil +} + +func (client *syncClient) ExecEnableDcpClientEnd(deadline time.Time) error { + memcli, ok := client.client.(*memdClient) + if !ok { + return errCliInternalError + } + + err := client.ExecDcpControl("send_stream_end_on_client_close_stream", "true", deadline) + if err != nil { + memcli.streamEndNotSupported = true + } + + return nil +} + +func (client *syncClient) ExecEnableDcpBufferAck(bufferSize int, deadline time.Time) error { + mclient, ok := client.client.(*memdClient) + if !ok { + return errCliInternalError + } + + // Enable buffer acknowledgment on the client + mclient.EnableDcpBufferAck(bufferSize / 2) + + bufferSizeStr := fmt.Sprintf("%d", bufferSize) + err := client.ExecDcpControl("connection_buffer_size", bufferSizeStr, deadline) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/timerpool.go b/vendor/github.com/couchbase/gocbcore/v9/timerpool.go new file mode 100644 index 000000000000..2e3dd288c0a4 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/timerpool.go @@ -0,0 +1,31 @@ +package gocbcore + +import ( + "sync" + "time" +) + +var globalTimerPool sync.Pool + +// AcquireTimer acquires a time from a global pool of timers maintained by the library. +func AcquireTimer(d time.Duration) *time.Timer { + tmr, isTmr := globalTimerPool.Get().(*time.Timer) + if tmr == nil || !isTmr { + if !isTmr && tmr != nil { + logErrorf("Encountered non-timer in timer pool") + } + + return time.NewTimer(d) + } + tmr.Reset(d) + return tmr +} + +// ReleaseTimer returns a timer to the global pool of timers maintained by the library. +func ReleaseTimer(t *time.Timer, wasRead bool) { + stopped := t.Stop() + if !wasRead && !stopped { + <-t.C + } + globalTimerPool.Put(t) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/tracing.go b/vendor/github.com/couchbase/gocbcore/v9/tracing.go new file mode 100644 index 000000000000..4e467719f8ad --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/tracing.go @@ -0,0 +1,193 @@ +package gocbcore + +import ( + "fmt" +) + +// RequestTracer describes the tracing abstraction in the SDK. +type RequestTracer interface { + StartSpan(operationName string, parentContext RequestSpanContext) RequestSpan +} + +// RequestSpan is the interface for spans that are created by a RequestTracer. +type RequestSpan interface { + Finish() + Context() RequestSpanContext + SetTag(key string, value interface{}) RequestSpan +} + +// RequestSpanContext is the interface for for external span contexts that can be passed in into the SDK option blocks. +type RequestSpanContext interface { +} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} +) + +type noopTracer struct { +} + +func (tracer noopTracer) StartSpan(operationName string, parentContext RequestSpanContext) RequestSpan { + return defaultNoopSpan +} + +func (span noopSpan) Finish() { +} + +func (span noopSpan) Context() RequestSpanContext { + return defaultNoopSpanContext +} + +func (span noopSpan) SetTag(key string, value interface{}) RequestSpan { + return defaultNoopSpan +} + +type opTracer struct { + parentContext RequestSpanContext + opSpan RequestSpan +} + +func (tracer *opTracer) Finish() { + if tracer.opSpan != nil { + tracer.opSpan.Finish() + } +} + +func (tracer *opTracer) RootContext() RequestSpanContext { + if tracer.opSpan != nil { + return tracer.opSpan.Context() + } + + return tracer.parentContext +} + +type tracerManager interface { + CreateOpTrace(operationName string, parentContext RequestSpanContext) *opTracer + StartHTTPSpan(req *httpRequest, name string) RequestSpan + StartCmdTrace(req *memdQRequest) + StartNetTrace(req *memdQRequest) +} + +type tracerComponent struct { + tracer RequestTracer + bucket string + noRootTraceSpans bool +} + +func newTracerComponent(tracer RequestTracer, bucket string, noRootTraceSpans bool) *tracerComponent { + return &tracerComponent{ + tracer: tracer, + bucket: bucket, + noRootTraceSpans: noRootTraceSpans, + } +} + +func (tc *tracerComponent) CreateOpTrace(operationName string, parentContext RequestSpanContext) *opTracer { + if tc.noRootTraceSpans { + return &opTracer{ + parentContext: parentContext, + opSpan: nil, + } + } + + opSpan := tc.tracer.StartSpan(operationName, parentContext). + SetTag("component", "couchbase-go-sdk"). + SetTag("db.instance", tc.bucket). + SetTag("span.kind", "client") + + return &opTracer{ + parentContext: parentContext, + opSpan: opSpan, + } +} + +func (tc *tracerComponent) StartHTTPSpan(req *httpRequest, name string) RequestSpan { + return tc.tracer.StartSpan(name, req.RootTraceContext). + SetTag("retry", req.RetryAttempts()) +} + +func (tc *tracerComponent) StartCmdTrace(req *memdQRequest) { + if req.cmdTraceSpan != nil { + logWarnf("Attempted to start tracing on traced request") + return + } + + if req.RootTraceContext == nil { + return + } + + req.processingLock.Lock() + req.cmdTraceSpan = tc.tracer.StartSpan(req.Packet.Command.Name(), req.RootTraceContext). + SetTag("retry", req.RetryAttempts()) + + req.processingLock.Unlock() +} + +func (tc *tracerComponent) StartNetTrace(req *memdQRequest) { + if req.cmdTraceSpan == nil { + return + } + + if req.netTraceSpan != nil { + logWarnf("Attempted to start net tracing on traced request") + return + } + + req.processingLock.Lock() + req.netTraceSpan = tc.tracer.StartSpan("rpc", req.cmdTraceSpan.Context()). + SetTag("span.kind", "client") + req.processingLock.Unlock() +} + +func stopCmdTrace(req *memdQRequest) { + if req.RootTraceContext == nil { + return + } + + if req.cmdTraceSpan == nil { + logWarnf("Attempted to stop tracing on untraced request") + return + } + + req.cmdTraceSpan.Finish() + req.cmdTraceSpan = nil +} + +func cancelReqTrace(req *memdQRequest) { + if req.cmdTraceSpan != nil { + if req.netTraceSpan != nil { + req.netTraceSpan.Finish() + } + + req.cmdTraceSpan.Finish() + } +} + +func stopNetTrace(req *memdQRequest, resp *memdQResponse, localAddress, remoteAddress string) { + if req.cmdTraceSpan == nil { + return + } + + if req.netTraceSpan == nil { + logWarnf("Attempted to stop net tracing on an untraced request") + return + } + + req.netTraceSpan.SetTag("couchbase.operation_id", fmt.Sprintf("0x%x", resp.Opaque)) + req.netTraceSpan.SetTag("couchbase.local_id", resp.sourceConnID) + if isLogRedactionLevelNone() { + req.netTraceSpan.SetTag("couchbase.document_key", string(req.Key)) + } + req.netTraceSpan.SetTag("local.address", localAddress) + req.netTraceSpan.SetTag("peer.address", remoteAddress) + if resp.Packet.ServerDurationFrame != nil { + req.netTraceSpan.SetTag("server_duration", resp.Packet.ServerDurationFrame.ServerDuration) + } + + req.netTraceSpan.Finish() + req.netTraceSpan = nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/util.go b/vendor/github.com/couchbase/gocbcore/v9/util.go new file mode 100644 index 000000000000..47317e4ca6bf --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/util.go @@ -0,0 +1,64 @@ +package gocbcore + +import ( + "crypto/rand" + "encoding/json" + "fmt" +) + +func getMapValueString(dict map[string]interface{}, key string, def string) string { + if dict != nil { + if val, ok := dict[key]; ok { + if valStr, ok := val.(string); ok { + return valStr + } + } + } + return def +} + +func getMapValueBool(dict map[string]interface{}, key string, def bool) bool { + if dict != nil { + if val, ok := dict[key]; ok { + if valStr, ok := val.(bool); ok { + return valStr + } + } + } + return def +} + +func randomCbUID() []byte { + out := make([]byte, 8) + _, err := rand.Read(out) + if err != nil { + logWarnf("Crypto read failed: %s", err) + } + return out +} + +func formatCbUID(data []byte) string { + return fmt.Sprintf("%02x%02x%02x%02x%02x%02x%02x%02x", + data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]) +} + +func clientInfoString(connID, userAgent string) string { + agentName := "gocbcore/" + goCbCoreVersionStr + if userAgent != "" { + agentName += " " + userAgent + } + + clientInfo := struct { + Agent string `json:"a"` + ConnID string `json:"i"` + }{ + Agent: agentName, + ConnID: connID, + } + clientInfoBytes, err := json.Marshal(clientInfo) + if err != nil { + logDebugf("Failed to generate client info string: %s", err) + } + + return string(clientInfoBytes) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/vbucketmap.go b/vendor/github.com/couchbase/gocbcore/v9/vbucketmap.go new file mode 100644 index 000000000000..fd9f114e7840 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/vbucketmap.go @@ -0,0 +1,85 @@ +package gocbcore + +type vbucketMap struct { + entries [][]int + numReplicas int +} + +func newVbucketMap(entries [][]int, numReplicas int) *vbucketMap { + vbMap := vbucketMap{ + entries: entries, + numReplicas: numReplicas, + } + return &vbMap +} + +func (vbMap vbucketMap) IsValid() bool { + return len(vbMap.entries) > 0 && len(vbMap.entries[0]) > 0 +} + +func (vbMap vbucketMap) NumVbuckets() int { + return len(vbMap.entries) +} + +func (vbMap vbucketMap) NumReplicas() int { + return vbMap.numReplicas +} + +func (vbMap vbucketMap) VbucketByKey(key []byte) uint16 { + return uint16(cbCrc(key) % uint32(len(vbMap.entries))) +} + +func (vbMap vbucketMap) NodeByVbucket(vbID uint16, replicaID uint32) (int, error) { + if vbID >= uint16(len(vbMap.entries)) { + return 0, errInvalidVBucket + } + + if replicaID >= uint32(len(vbMap.entries[vbID])) { + return 0, errInvalidReplica + } + + return vbMap.entries[vbID][replicaID], nil +} + +func (vbMap vbucketMap) VbucketsOnServer(index int) ([]uint16, error) { + vbList, err := vbMap.VbucketsByServer(0) + if err != nil { + return nil, err + } + + if len(vbList) <= index { + // Invalid server index + return nil, errInvalidReplica + } + + return vbList[index], nil +} + +func (vbMap vbucketMap) VbucketsByServer(replicaID int) ([][]uint16, error) { + var vbList [][]uint16 + + // We do not currently support listing for all replicas at once + if replicaID < 0 { + return nil, errInvalidReplica + } + + for vbID, entry := range vbMap.entries { + if len(entry) <= replicaID { + continue + } + + serverID := entry[replicaID] + + for len(vbList) <= serverID { + vbList = append(vbList, nil) + } + + vbList[serverID] = append(vbList[serverID], uint16(vbID)) + } + + return vbList, nil +} + +func (vbMap vbucketMap) NodeByKey(key []byte, replicaID uint32) (int, error) { + return vbMap.NodeByVbucket(vbMap.VbucketByKey(key), replicaID) +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/version.go b/vendor/github.com/couchbase/gocbcore/v9/version.go new file mode 100644 index 000000000000..4670585a57e0 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/version.go @@ -0,0 +1,6 @@ +package gocbcore + +// Version returns a string representation of the current SDK version. +func Version() string { + return goCbCoreVersionStr +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/viewscomponent.go b/vendor/github.com/couchbase/gocbcore/v9/viewscomponent.go new file mode 100644 index 000000000000..a79242103ae9 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/viewscomponent.go @@ -0,0 +1,187 @@ +package gocbcore + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" +) + +// ViewQueryRowReader providers access to the rows of a view query +type ViewQueryRowReader struct { + streamer *queryStreamer +} + +// NextRow reads the next rows bytes from the stream +func (q *ViewQueryRowReader) NextRow() []byte { + return q.streamer.NextRow() +} + +// Err returns any errors that occurred during streaming. +func (q ViewQueryRowReader) Err() error { + return q.streamer.Err() +} + +// MetaData fetches the non-row bytes streamed in the response. +func (q *ViewQueryRowReader) MetaData() ([]byte, error) { + return q.streamer.MetaData() +} + +// Close immediately shuts down the connection +func (q *ViewQueryRowReader) Close() error { + return q.streamer.Close() +} + +// ViewQueryOptions represents the various options available for a view query. +type ViewQueryOptions struct { + DesignDocumentName string + ViewType string + ViewName string + Options url.Values + RetryStrategy RetryStrategy + Deadline time.Time + + // Volatile: Tracer API is subject to change. + TraceContext RequestSpanContext +} + +func wrapViewQueryError(req *httpRequest, ddoc, view string, err error) *ViewError { + if err == nil { + err = errors.New("view error") + } + + ierr := &ViewError{ + InnerError: err, + } + + if req != nil { + ierr.Endpoint = req.Endpoint + ierr.RetryAttempts = req.RetryAttempts() + ierr.RetryReasons = req.RetryReasons() + } + + ierr.DesignDocumentName = ddoc + ierr.ViewName = view + + return ierr +} + +func parseViewQueryError(req *httpRequest, ddoc, view string, resp *HTTPResponse) *ViewError { + var err error + var errorDescs []ViewQueryErrorDesc + + respBody, readErr := ioutil.ReadAll(resp.Body) + if readErr == nil { + var errsMap map[string]string + var errsArr []string + + if err := json.Unmarshal(respBody, &errsArr); err != nil { + errorDescs = make([]ViewQueryErrorDesc, len(errsArr)) + for errIdx, errMessage := range errsArr { + errorDescs[errIdx] = ViewQueryErrorDesc{ + SourceNode: "", + Message: errMessage, + } + } + } else if err := json.Unmarshal(respBody, &errsMap); err != nil { + for errNode, errMessage := range errsMap { + errorDescs = append(errorDescs, ViewQueryErrorDesc{ + SourceNode: errNode, + Message: errMessage, + }) + } + } + } + + if resp.StatusCode == 401 { + err = errAuthenticationFailure + } else if resp.StatusCode == 404 { + err = errViewNotFound + } + + if len(errorDescs) >= 1 { + firstErrMsg := errorDescs[0].Message + + if strings.Contains(firstErrMsg, "not_found") { + err = errViewNotFound + } + } + + errOut := wrapViewQueryError(req, ddoc, view, err) + errOut.Errors = errorDescs + return errOut +} + +type viewQueryComponent struct { + httpComponent *httpComponent + tracer *tracerComponent +} + +func newViewQueryComponent(httpComponent *httpComponent, tracer *tracerComponent) *viewQueryComponent { + return &viewQueryComponent{ + httpComponent: httpComponent, + tracer: tracer, + } +} + +// ViewQuery executes a view query +func (vqc *viewQueryComponent) ViewQuery(opts ViewQueryOptions, cb ViewQueryCallback) (PendingOp, error) { + tracer := vqc.tracer.CreateOpTrace("ViewQuery", opts.TraceContext) + defer tracer.Finish() + + reqURI := fmt.Sprintf("/_design/%s/%s/%s?%s", + opts.DesignDocumentName, opts.ViewType, opts.ViewName, opts.Options.Encode()) + + ctx, cancel := context.WithCancel(context.Background()) + ireq := &httpRequest{ + Service: CapiService, + Method: "GET", + Path: reqURI, + IsIdempotent: true, + Deadline: opts.Deadline, + RetryStrategy: opts.RetryStrategy, + RootTraceContext: tracer.RootContext(), + Context: ctx, + CancelFunc: cancel, + } + + ddoc := opts.DesignDocumentName + view := opts.ViewName + + go func() { + resp, err := vqc.httpComponent.DoInternalHTTPRequest(ireq, false) + if err != nil { + cancel() + // execHTTPRequest will handle retrying due to in-flight socket close based + // on whether or not IsIdempotent is set on the httpRequest + cb(nil, wrapViewQueryError(ireq, ddoc, view, err)) + return + } + + if resp.StatusCode != 200 { + viewErr := parseViewQueryError(ireq, ddoc, view, resp) + + cancel() + // viewErr is already wrapped here + cb(nil, viewErr) + return + } + + streamer, err := newQueryStreamer(resp.Body, "rows") + if err != nil { + cancel() + cb(nil, wrapViewQueryError(ireq, ddoc, view, err)) + return + } + + cb(&ViewQueryRowReader{ + streamer: streamer, + }, nil) + }() + + return ireq, nil +} diff --git a/vendor/github.com/couchbase/gocbcore/v9/zombielogger_component.go b/vendor/github.com/couchbase/gocbcore/v9/zombielogger_component.go new file mode 100644 index 000000000000..dc14ab1cae77 --- /dev/null +++ b/vendor/github.com/couchbase/gocbcore/v9/zombielogger_component.go @@ -0,0 +1,169 @@ +package gocbcore + +import ( + "encoding/json" + "fmt" + "sort" + "sync" + "time" +) + +type zombieLogEntry struct { + connectionID string + operationID string + endpoint string + duration time.Duration + serviceType string +} + +type zombieLogItem struct { + ConnectionID string `json:"c"` + OperationID string `json:"i"` + Endpoint string `json:"r"` + ServerDurationUs uint64 `json:"d"` + ServiceType string `json:"s"` +} + +type zombieLogService struct { + Service string `json:"service"` + Count int `json:"count"` + Top []zombieLogItem `json:"top"` +} + +type zombieLoggerComponent struct { + zombieLock sync.RWMutex + zombieOps []*zombieLogEntry + interval time.Duration + sampleSize int + stopSig chan struct{} +} + +func newZombieLoggerComponent(interval time.Duration, sampleSize int) *zombieLoggerComponent { + return &zombieLoggerComponent{ + // zombieOps must have a static capacity for its lifetime, the capacity should + // never be altered so that it is consistent across the zombieLogger and + // recordZombieResponse. + zombieOps: make([]*zombieLogEntry, 0, sampleSize), + interval: interval, + sampleSize: sampleSize, + stopSig: make(chan struct{}), + } +} + +func (zlc *zombieLoggerComponent) Start() { + lastTick := time.Now() + + for { + select { + case <-zlc.stopSig: + return + case <-time.After(zlc.interval): + } + + lastTick = lastTick.Add(zlc.interval) + + // Preallocate space to copy the ops into... + oldOps := make([]*zombieLogEntry, zlc.sampleSize) + + zlc.zombieLock.Lock() + // Escape early if we have no ops to log... + if len(zlc.zombieOps) == 0 { + zlc.zombieLock.Unlock() + continue + } + + // Copy out our ops so we can cheaply print them out without blocking + // our ops from actually being recorded in other goroutines (which would + // effectively slow down the op pipeline for logging). + oldOps = oldOps[0:len(zlc.zombieOps)] + copy(oldOps, zlc.zombieOps) + zlc.zombieOps = zlc.zombieOps[:0] + + zlc.zombieLock.Unlock() + + jsonData := zombieLogService{ + Service: "kv", + } + + for i := len(oldOps) - 1; i >= 0; i-- { + op := oldOps[i] + + jsonData.Top = append(jsonData.Top, zombieLogItem{ + OperationID: op.operationID, + ConnectionID: op.connectionID, + Endpoint: op.endpoint, + ServerDurationUs: uint64(op.duration / time.Microsecond), + ServiceType: op.serviceType, + }) + } + + jsonData.Count = len(jsonData.Top) + + jsonBytes, err := json.Marshal(jsonData) + if err != nil { + logDebugf("Failed to generate zombie logging JSON: %s", err) + } + + logWarnf("Orphaned responses observed:\n %s", jsonBytes) + } +} + +func (zlc *zombieLoggerComponent) Stop() { + close(zlc.stopSig) +} + +func (zlc *zombieLoggerComponent) RecordZombieResponse(resp *memdQResponse, connID, address string) { + entry := &zombieLogEntry{ + connectionID: connID, + operationID: fmt.Sprintf("0x%x", resp.Opaque), + endpoint: address, + duration: 0, + serviceType: fmt.Sprintf("kv:%s", resp.Command.Name()), + } + + if resp.Packet.ServerDurationFrame != nil { + entry.duration = resp.Packet.ServerDurationFrame.ServerDuration + } + + zlc.zombieLock.RLock() + + if cap(zlc.zombieOps) == 0 || (len(zlc.zombieOps) == cap(zlc.zombieOps) && + entry.duration < zlc.zombieOps[0].duration) { + // we are at capacity and we are faster than the fastest slow op or somehow in a state where capacity is 0. + zlc.zombieLock.RUnlock() + return + } + zlc.zombieLock.RUnlock() + + zlc.zombieLock.Lock() + if cap(zlc.zombieOps) == 0 || (len(zlc.zombieOps) == cap(zlc.zombieOps) && + entry.duration < zlc.zombieOps[0].duration) { + // we are at capacity and we are faster than the fastest slow op or somehow in a state where capacity is 0. + zlc.zombieLock.Unlock() + return + } + + l := len(zlc.zombieOps) + i := sort.Search(l, func(i int) bool { return entry.duration < zlc.zombieOps[i].duration }) + + // i represents the slot where it should be inserted + + if len(zlc.zombieOps) < cap(zlc.zombieOps) { + if i == l { + zlc.zombieOps = append(zlc.zombieOps, entry) + } else { + zlc.zombieOps = append(zlc.zombieOps, nil) + copy(zlc.zombieOps[i+1:], zlc.zombieOps[i:]) + zlc.zombieOps[i] = entry + } + } else { + if i == 0 { + zlc.zombieOps[i] = entry + } else { + copy(zlc.zombieOps[0:i-1], zlc.zombieOps[1:i]) + zlc.zombieOps[i-1] = entry + } + } + + zlc.zombieLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml deleted file mode 100644 index 01c5dc219afa..000000000000 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - - 1.4 - - 1.9 - - "1.10" - - 1.11 - - 1.12 - -script: - - go test diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 6f3a15ce7721..851a337beb41 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,6 @@ # Versioning Library for Go -[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 1032c5606c37..09703e8e6ff0 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -280,6 +280,10 @@ func comparePrereleases(v string, other string) int { // Equal tests if two versions are equal. func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + return v.Compare(o) == 0 } @@ -288,7 +292,7 @@ func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } -// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. +// GreaterThanOrEqual tests if this version is greater than or equal to another version. func (v *Version) GreaterThanOrEqual(o *Version) bool { return v.Compare(o) >= 0 } @@ -298,7 +302,7 @@ func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } -// LessThanOrEqualTo tests if this version is less than or equal to another version. +// LessThanOrEqual tests if this version is less than or equal to another version. func (v *Version) LessThanOrEqual(o *Version) bool { return v.Compare(o) <= 0 } diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/LICENSE b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/LICENSE new file mode 100644 index 000000000000..a612ad9813b0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/Makefile b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/Makefile new file mode 100644 index 000000000000..237163ccd27e --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/Makefile @@ -0,0 +1,38 @@ +TOOL?=vault-plugin-database-couchbase +TEST?=$$(go list ./... | grep -v /vendor/ | grep -v teamcity) +VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr +BUILD_TAGS?=${TOOL} +GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) +GO_TEST_CMD?=go test -v + +# bin generates the releaseable binaries for this plugin +bin: fmtcheck + @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'" + +default: dev + +# dev starts up `vault` from your $PATH, then builds the couchbase +# plugin, registers it with vault and enables it. +# A ./tmp dir is created for configs and binaries, and cleaned up on exit. +dev: fmtcheck + @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" + +# test runs the unit tests and vets the code +test: fmtcheck + CGO_ENABLED=0 VAULT_TOKEN= ${GO_TEST_CMD} -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -count=1 -timeout=5m -parallel=4 + +testacc: fmtcheck + CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC=1 ${GO_TEST_CMD} -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -count=1 -timeout=20m + +testcompile: fmtcheck + @for pkg in $(TEST) ; do \ + go test -v -c -tags='$(BUILD_TAGS)' $$pkg ; \ + done + +fmtcheck: + @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" + +fmt: + gofmt -w $(GOFMT_FILES) + +.PHONY: bin default dev test testcompile fmtcheck fmt \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/README.md b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/README.md new file mode 100644 index 000000000000..49346f996e16 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/README.md @@ -0,0 +1,146 @@ +# vault-plugin-database-couchbase + +[![CircleCI](https://circleci.com/gh/hashicorp/vault-plugin-database-couchbase.svg?style=svg)](https://circleci.com/gh/hashicorp/vault-plugin-database-couchbase) + +A [Vault](https://www.vaultproject.io) plugin for Couchbase + +This project uses the database plugin interface introduced in Vault version 0.7.1. + +The plugin supports the generation of static and dynamic user roles and root credential rotation. + +## Build + +For Linux/AMD64, pre-built binaries can be found at [the releases page](https://releases.hashicorp.com/vault-plugin-database-couchbase/) (built with the Couchbase Go SDK version 2.1.1) + +For other platforms, there are not currently pre-built binaries available. + +To build this package for any platform you will need to clone this repository and cd into the repo directory and `go build -o couchbase-database-plugin ./cmd/couchbase-database-plugin/`. To test `go test` will execute a set of basic tests against against the docker.io/couchbase/server-sandbox:6.5.0 couchbase database image. To test against different sandbox images, for example 5.5.1, set the `COUCHBASE_VERSION=5.5.1` environment variable. If you want to run the tests against a local couchbase installation or an already running couchbase container, set the environment variable `COUCHBASE_HOST` before executing. **Note** you will need to align the Administrator username, password and bucket_name with the pre-set values in the `couchbase_test.go` file. Set VAULT_ACC to execute all of the tests. A subset of tests can be run using the command `go test -run TestDriver/Init` for example. + +## Installation + +The Vault plugin system is documented on the [Vault documentation site](https://www.vaultproject.io/docs/internals/plugins.html). + +You will need to define a plugin directory using the `plugin_directory` configuration directive, then place the +`vault-plugin-database-couchbase` executable generated above, into the directory. + +Sample commands for registering and starting to use the plugin: + +```bash +$ SHA256=$(shasum -a 256 plugins/couchbase-database-plugin | cut -d' ' -f1) + +$ vault secrets enable database + +$ vault write sys/plugins/catalog/database/couchbase-database-plugin sha256=$SHA256 \ + command=couchbase-database-plugin +``` + +At this stage you are now ready to initialize the plugin to connect to couchbase cluster using unencrypted or encrypted communications. + +Prior to initializing the plugin, ensure that you have created an administration account. Vault will use the user specified here to create/update/revoke database credentials. That user must have the appropriate permissions to perform actions upon other database users. + +### Unencrypted plugin initialization + +```bash +$ vault write database/config/insecure-couchbase plugin_name="couchbase-database-plugin" \ + hosts="localhost" username="Administrator" password="password" \ + bucket_name="travel-sample" \ # only needed for pre-6.5.0 clusters + allowed_roles="insecure-couchbase-admin-role,insecure-couchbase-*-bucket-role,static-account" + +# You should consider rotating the admin password. Note that if you do, the new password will never be made available +# through Vault, so you should create a vault-specific database admin user for this. +$ vault write -force database/rotate-root/insecure-couchbase + + ``` + +Note: If you want to connect the plugin to a couchbase cluster prior to version 6.5.0 you will also have to supply an existing bucket (bucket_name="travel-sample") or the command will fail with the error message **"error verifying connection: error in Connection waiting for cluster: unambiguous timeout"**. + +### Encrypted plugin initialization + +The example here uses the self signed CA certificate that comes with the out of the box couchbase cluster installation and is not suitable for real production use where commercial grade certificates should be obtained. + +```bash +$ BASE64PEM=$(curl -X GET http://Administrator:Admin123@127.0.0.1:8091/pools/default/certificate|base64 -w0) + +$ vault write database/config/secure-couchbase plugin_name="couchbase-database-plugin" \ + hosts="couchbases://localhost" username="Administrator" password="password" \ + tls=true base64pem=${BASE64PEM} \ + bucket_name="travel-sample" \ # only needed for pre-6.5.0 clusters + allowed_roles="secure-couchbase-admin-role,secure-couchbase-*-bucket-role,static-account" + +# You should consider rotating the admin password. Note that if you do, the new password will never be made available +# through Vault, so you should create a vault-specific database admin user for this. +$ vault write -force database/rotate-root/secure-couchbase +``` + +### Dynamic Role Creation + +When you create roles, you need to provide a JSON string containing the Couchbase RBAC roles which are documented [here](https://docs.couchbase.com/server/6.5/learn/security/roles.html). From Couchbase 6.5 groups are supported and the creation statement can contain just roles or just groups or a mixture of the two. **Note** to use a group, it must have been created in the database previously. + +```bash +# if a creation_statement is not provided the user account will default to read only admin, '{"roles":[{"role":"ro_admin"}]}' +$ vault write database/roles/insecure-couchbase-admin-role db_name=insecure-couchbase \ + default_ttl="5m" max_ttl="1h" creation_statements='{"roles":[{"role":"admin"}],"groups":["Supervisor"]}' + +$ vault write database/roles/insecure-couchbase-travel-sample-bucket-role db_name=insecure-couchbase \ + default_ttl="5m" max_ttl="1h" creation_statements='{"roles":[{"role":"bucket_full_access","bucket_name":"travel-sample"}]}' +Success! Data written to: database/roles/insecure-couchbase-travel-sample-bucket-role +``` + +If you create a role that uses groups on a pre 6.5 couchbase server it will be successful, but when you try to generate credentials +you will receive the error **rpc error: code = Unknown desc = {"errors":{"groups":"Unsupported key"}} ...** + +To retrieve the credentials for the dynamic accounts + +```bash + +$ vault read database/creds/insecure-couchbase-admin-role +Key Value +--- ----- +lease_id database/creds/insecure-couchbase-admin-role/KJ7CTmpFni6U6BCDJ14HcmDm +lease_duration 5m +lease_renewable true +password A1a-yCSH5rAh8QAkCzwu +username v-token-insecure-couchbase-admin-role-yA2hgb0tfewf + +$ vault read database/creds/insecure-couchbase-travel-sample-bucket-role +Key Value +--- ----- +lease_id database/creds/insecure-couchbase-travel-sample-bucket-role/OzHdfkIZdeY9p8kjdWur512j +lease_duration 5m +lease_renewable true +password A1a-0yTIuO4q0dCvphz1 +username v-token-insecure-couchbase-travel-sample-bucket-role-iN5 + +``` + +### Static Role Creation + +In order to use static roles, the user must already exist in the Couchbase security settings. The example below assumes that there is an existing user with the name "vault-edu". If the user does not exist you will receive the following error. + +```bash +* 1 error occurred: + * error setting credentials: rpc error: code = Unknown desc = user not found | {"unique_id":"74f229fd-b3b3-4036-9673-312adae094bb","endpoint":"http://localhost:8091"} +``` + +```bash +$ vault write database/static-roles/static-account db_name=insecure-couchbase \ + username="vault-edu" rotation_period="5m" +Success! Data written to: database/static-roles/static-account +```` + +To retrieve the credentials for the vault-edu user + +```bash +$ vault read database/static-creds/static-account +Key Value +--- ----- +last_vault_rotation 2020-06-15T14:32:16.682130141-05:00 +password A1a-09ApRvglZY1Usdjp +rotation_period 5m +ttl 30s +username vault-edu +``` + +## Developing + +You can run `make dev` in the root of the repo to start up a development vault server and automatically register a local build of the plugin. You will need to have a built `vault` binary available in your `$PATH` to do so. diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/connection_producer.go b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/connection_producer.go new file mode 100644 index 000000000000..01f1a12dbfa8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/connection_producer.go @@ -0,0 +1,184 @@ +package couchbase + +import ( + "context" + "crypto/x509" + "encoding/base64" + "fmt" + "strings" + "sync" + "time" + + "github.com/couchbase/gocb/v2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/mitchellh/mapstructure" +) + +type couchbaseDBConnectionProducer struct { + PublicKey string `json:"public_key"` + PrivateKey string `json:"private_key"` + ProjectID string `json:"project_id"` + Hosts string `json:"hosts"` + Username string `json:"username"` + Password string `json:"password"` + TLS bool `json:"tls"` + InsecureTLS bool `json:"insecure_tls"` + Base64Pem string `json:"base64pem"` + BucketName string `json:"bucket_name"` + + Initialized bool + rawConfig map[string]interface{} + Type string + cluster *gocb.Cluster + sync.Mutex +} + +func (c *couchbaseDBConnectionProducer) secretValues() map[string]interface{} { + return map[string]interface{}{ + c.Password: "[password]", + c.Username: "[username]", + } +} + +func (c *couchbaseDBConnectionProducer) Init(ctx context.Context, initConfig map[string]interface{}, verifyConnection bool) (saveConfig map[string]interface{}, err error) { + + c.Lock() + defer c.Unlock() + + c.rawConfig = initConfig + + decoderConfig := &mapstructure.DecoderConfig{ + Result: c, + WeaklyTypedInput: true, + TagName: "json", + } + + decoder, err := mapstructure.NewDecoder(decoderConfig) + if err != nil { + return nil, err + } + + err = decoder.Decode(initConfig) + if err != nil { + return nil, err + } + + switch { + case len(c.Hosts) == 0: + return nil, fmt.Errorf("hosts cannot be empty") + case len(c.Username) == 0: + return nil, fmt.Errorf("username cannot be empty") + case len(c.Password) == 0: + return nil, fmt.Errorf("password cannot be empty") + } + + if c.TLS { + if len(c.Base64Pem) == 0 { + return nil, fmt.Errorf("base64pem cannot be empty") + } + + if !strings.HasPrefix(c.Hosts, "couchbases://") { + return nil, fmt.Errorf("hosts list must start with couchbases:// for TLS connection") + } + } + + c.Initialized = true + + if verifyConnection { + if _, err := c.Connection(ctx); err != nil { + c.close() + return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + } + } + + return initConfig, nil +} + +func (c *couchbaseDBConnectionProducer) Initialize(ctx context.Context, config map[string]interface{}, verifyConnection bool) error { + _, err := c.Init(ctx, config, verifyConnection) + return err +} +func (c *couchbaseDBConnectionProducer) Connection(_ context.Context) (interface{}, error) { + // This is intentionally not grabbing the lock since the calling functions (e.g. CreateUser) + // are claiming it. (The locking patterns could be refactored to be more consistent/clear.) + + if !c.Initialized { + return nil, connutil.ErrNotInitialized + } + + if c.cluster != nil { + return c.cluster, nil + } + var err error + var sec gocb.SecurityConfig + var pem []byte + + if c.TLS { + pem, err = base64.StdEncoding.DecodeString(c.Base64Pem) + if err != nil { + return nil, errwrap.Wrapf("error decoding Base64Pem: {{err}}", err) + } + rootCAs := x509.NewCertPool() + ok := rootCAs.AppendCertsFromPEM([]byte(pem)) + if !ok { + return nil, fmt.Errorf("failed to parse root certificate") + } + sec = gocb.SecurityConfig{ + TLSRootCAs: rootCAs, + TLSSkipVerify: c.InsecureTLS, + } + } + + c.cluster, err = gocb.Connect( + c.Hosts, + gocb.ClusterOptions{ + Username: c.Username, + Password: c.Password, + SecurityConfig: sec, + }) + if err != nil { + return nil, errwrap.Wrapf("error in Connection: {{err}}", err) + } + + // For databases 6.0 and earlier, we will need to open a `Bucket instance before connecting to any other + // HTTP services such as UserManager. + + if c.BucketName != "" { + bucket := c.cluster.Bucket(c.BucketName) + // We wait until the bucket is definitely connected and setup. + err = bucket.WaitUntilReady(5*time.Second, nil) + if err != nil { + return nil, errwrap.Wrapf("error in Connection waiting for bucket: {{err}}", err) + } + } else { + err = c.cluster.WaitUntilReady(5*time.Second, nil) + + if err != nil { + return nil, errwrap.Wrapf("error in Connection waiting for cluster: {{err}}", err) + } + } + + return c.cluster, nil +} + +// close terminates the database connection without locking +func (c *couchbaseDBConnectionProducer) close() error { + + if c.cluster != nil { + if err := c.cluster.Close(&gocb.ClusterCloseOptions{}); err != nil { + return err + } + } + + c.cluster = nil + return nil +} + +// Close terminates the database connection with locking +func (c *couchbaseDBConnectionProducer) Close() error { + c.Lock() + defer c.Unlock() + + return c.close() +} diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/couchbase.go b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/couchbase.go new file mode 100644 index 000000000000..2ad84b4ae7c5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/couchbase.go @@ -0,0 +1,325 @@ +package couchbase + +import ( + "context" + "encoding/json" + "errors" + "time" + + "github.com/couchbase/gocb/v2" + "github.com/hashicorp/errwrap" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" +) + +const ( + couchbaseTypeName = "couchbase" + defaultCouchbaseUserRole = `{"Roles": [{"role":"ro_admin"}]}` +) + +var ( + _ dbplugin.Database = &CouchbaseDB{} +) + +// Type that combines the custom plugins Couchbase database connection configuration options and the Vault CredentialsProducer +// used for generating user information for the Couchbase database. +type CouchbaseDB struct { + *couchbaseDBConnectionProducer + credsutil.CredentialsProducer +} + +// Type that combines the Couchbase Roles and Groups representing specific account permissions. Used to pass roles and or +// groups between the Vault server and the custom plugin in the dbplugin.Statements +type RolesAndGroups struct { + Roles []gocb.Role `json:"roles"` + Groups []string `json:"groups"` +} + +// New implements builtinplugins.BuiltinFactory +func New() (interface{}, error) { + db := new() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + return dbType, nil +} + +func new() *CouchbaseDB { + connProducer := &couchbaseDBConnectionProducer{} + connProducer.Type = couchbaseTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 50, + RoleNameLen: 50, + UsernameLen: 50, + Separator: "-", + } + + db := &CouchbaseDB{ + couchbaseDBConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return db +} + +// Run instantiates a CouchbaseDB object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database), api.VaultPluginTLSProvider(apiTLSConfig)) + + return nil +} + +func (c *CouchbaseDB) Type() (string, error) { + return couchbaseTypeName, nil +} + +func computeTimeout(ctx context.Context) (timeout time.Duration) { + deadline, ok := ctx.Deadline() + if ok { + return time.Until(deadline) + } + return 5 * time.Second +} +func (c *CouchbaseDB) getConnection(ctx context.Context) (*gocb.Cluster, error) { + db, err := c.Connection(ctx) + if err != nil { + return nil, err + } + return db.(*gocb.Cluster), nil +} + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (c *CouchbaseDB) SetCredentials(ctx context.Context, _ dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + username = staticUser.Username + password = staticUser.Password + if username == "" || password == "" { + return "", "", errors.New("must provide both username and password") + } + + // Grab the lock + c.Lock() + defer c.Unlock() + + // Get the connection + db, err := c.getConnection(ctx) + if err != nil { + return "", "", err + } + + // Close the database connection to ensure no new connections come in + defer func() { + if err := c.close(); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + logger.Error("defer close failed", "error", err) + } + }() + + // Get the UserManager + + mgr := db.Users() + + // Get the User and error out if it does not exist. + + userOpts, err := mgr.GetUser(username, nil) + if err != nil { + return "", "", err + } + + user := gocb.User{ + Username: username, + Password: password, + Roles: userOpts.Roles, + Groups: userOpts.Groups, + DisplayName: userOpts.DisplayName, + } + + err = mgr.UpsertUser(user, + &gocb.UpsertUserOptions{ + Timeout: computeTimeout(ctx), + DomainName: string(userOpts.Domain), + }) + + if err != nil { + return "", "", err + } + + return username, password, nil +} + +func (c *CouchbaseDB) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, _ time.Time) (username string, password string, err error) { + // Grab the lock + c.Lock() + defer c.Unlock() + + statements = dbutil.StatementCompatibilityHelper(statements) + + if len(statements.Creation) == 0 { + statements.Creation = append(statements.Creation, defaultCouchbaseUserRole) + } + + jsonRoleAndGroupData := []byte(statements.Creation[0]) + + var rag RolesAndGroups + + err = json.Unmarshal(jsonRoleAndGroupData, &rag) + if err != nil { + return "", "", errwrap.Wrapf("error unmarshaling roles and groups creation statement JSON: {{err}}", err) + } + + username, err = c.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = c.GeneratePassword() + if err != nil { + return "", "", err + } + + // Get the connection + db, err := c.getConnection(ctx) + if err != nil { + return "", "", err + } + + // Close the database connection to ensure no new connections come in + defer func() { + if err := c.close(); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + logger.Error("defer close failed", "error", err) + } + }() + + // Get the UserManager + + mgr := db.Users() + + user := gocb.User{ + Username: username, + DisplayName: usernameConfig.DisplayName, + Password: password, + Roles: rag.Roles, + Groups: rag.Groups, + } + + err = mgr.UpsertUser(user, + &gocb.UpsertUserOptions{ + Timeout: computeTimeout(ctx), + DomainName: "local", + }) + if err != nil { + return "", "", err + } + + return username, password, nil +} + +// RenewUser is not supported by Couchbase, so this is a no-op. +func (p *CouchbaseDB) RenewUser(ctx context.Context, statements dbplugin.Statements, username string, expiration time.Time) error { + // NOOP + + return nil +} + +func (c *CouchbaseDB) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error { + // Grab the lock + c.Lock() + defer c.Unlock() + + db, err := c.getConnection(ctx) + if err != nil { + return err + } + + // Close the database connection to ensure no new connections come in + defer func() { + if err := c.close(); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + logger.Error("defer close failed", "error", err) + } + }() + + // Get the UserManager + mgr := db.Users() + + err = mgr.DropUser(username, nil) + + if err != nil { + return err + } + + return nil +} + +func (c *CouchbaseDB) RotateRootCredentials(ctx context.Context, _ []string) (map[string]interface{}, error) { + c.Lock() + defer c.Unlock() + + if len(c.Username) == 0 || len(c.Password) == 0 { + return nil, errors.New("username and password are required to rotate") + } + + password, err := c.GeneratePassword() + if err != nil { + return nil, err + } + + db, err := c.getConnection(ctx) + if err != nil { + return nil, err + } + + // Close the database connection to ensure no new connections come in + defer func() { + if err := c.close(); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + logger.Error("defer close failed", "error", err) + } + }() + + // Get the UserManager + + mgr := db.Users() + + // Get the User + + userOpts, err := mgr.GetUser(c.Username, nil) + if err != nil { + return nil, err + } + + user := gocb.User{ + Username: c.Username, + Password: password, + Roles: userOpts.Roles, + Groups: userOpts.Groups, + DisplayName: userOpts.DisplayName, + } + + err = mgr.UpsertUser(user, + &gocb.UpsertUserOptions{ + Timeout: computeTimeout(ctx), + DomainName: string(userOpts.Domain), + }) + + if err != nil { + return nil, err + } + + c.rawConfig["password"] = password + + return c.rawConfig, nil +} diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/go.mod b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/go.mod new file mode 100644 index 000000000000..a80a536a3c89 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/go.mod @@ -0,0 +1,27 @@ +module github.com/hashicorp/vault-plugin-database-couchbase + +go 1.14 + +require ( + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Microsoft/go-winio v0.4.14 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible + github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe // indirect + github.com/couchbase/gocb/v2 v2.1.4 + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/hashicorp/errwrap v1.0.0 + github.com/hashicorp/go-hclog v0.14.1 + github.com/hashicorp/go-version v1.2.1 + github.com/hashicorp/vault/api v1.0.4 + github.com/hashicorp/vault/sdk v0.1.13 + github.com/lib/pq v1.8.0 // indirect + github.com/mitchellh/mapstructure v1.3.3 + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v0.1.1 // indirect + github.com/ory/dockertest v3.3.5+incompatible + github.com/sirupsen/logrus v1.6.0 // indirect + gotest.tools v2.2.0+incompatible // indirect +) diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/go.sum b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/go.sum new file mode 100644 index 000000000000..a2b50266400a --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/go.sum @@ -0,0 +1,226 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe h1:PEmIrUvwG9Yyv+0WKZqjXfSFDeZjs/q15g0m08BYS9k= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/couchbase/gocb/v2 v2.1.4 h1:HRuVhqZpVNIck3FwzTxWh5TnmGXeTmSfjhxkjeradLg= +github.com/couchbase/gocb/v2 v2.1.4/go.mod h1:lESKM6wCEajrFVSZUewYuRzNtuNtnRey5wOfcZZsH90= +github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8= +github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.4 h1:j08Or/wryXT4AcHj1oCbMd7IijXcKzYUGw59LGu9onU= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13 h1:mOEPeOhT7jl0J4AMl1E705+BcmeRs1VmKNb9F0sMLy8= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault-plugin-database-couchbase/httputils.go b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/httputils.go new file mode 100644 index 000000000000..97527c4ea5cb --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-couchbase/httputils.go @@ -0,0 +1,148 @@ +package couchbase + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/cenkalti/backoff" + "github.com/hashicorp/go-version" +) + +func CheckForOldCouchbaseVersion(hostname, username, password string) (is_old bool, err error) { + + //[TODO] handle list of hostnames + + resp, err := http.Get(fmt.Sprintf("http://%s:%s@%s:8091/pools", username, password, hostname)) + if err != nil { + return false, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, err + } + + type Pools struct { + ImplementationVersion string `json:"implementationVersion"` + } + data := Pools{} + err = json.Unmarshal(body, &data) + if err != nil { + return false, err + } + v, err := version.NewVersion(data.ImplementationVersion) + + v650, err := version.NewVersion("6.5.0-0000") + if err != nil { + return false, err + } + + if v.LessThan(v650) { + return true, nil + } + return false, nil + +} + +func getRootCAfromCouchbase(url string) (Base64pemCA string, err error) { + resp, err := http.Get(url) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(body), nil +} + +func createUser(hostname string, port int, adminuser, adminpassword, username, password, rbacName, roles string) (err error) { + v := url.Values{} + + v.Set("password", password) + v.Add("roles", roles) + v.Add("name", rbacName) + + req, err := http.NewRequest(http.MethodPut, + fmt.Sprintf("http://%s:%s@%s:%d/settings/rbac/users/local/%s", + adminuser, adminpassword, hostname, port, username), + strings.NewReader(v.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.Status != "200 OK" { + return fmt.Errorf("createUser returned %s", resp.Status) + } + return nil +} + +func createGroup(hostname string, port int, adminuser, adminpassword, group, roles string) (err error) { + v := url.Values{} + + v.Set("roles", roles) + + req, err := http.NewRequest(http.MethodPut, + fmt.Sprintf("http://%s:%s@%s:%d/settings/rbac/groups/%s", + adminuser, adminpassword, hostname, port, group), + strings.NewReader(v.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.Status != "200 OK" { + return fmt.Errorf("createGroup returned %s", resp.Status) + } + return nil +} + +func waitForBucketInstalled(address, username, password, bucket string) (bucketFound, bucketInstalled bool, err error) { + resp, err := http.Get(fmt.Sprintf("http://%s:%s@%s:8091/sampleBuckets", username, password, address)) + if err != nil { + return false, false, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, false, err + } + + type installed []struct { + Name string `json:"name"` + Installed bool `json:"installed"` + QuotaNeeded int64 `json:"quotaNeeded"` + } + + var iresult installed + + err = json.Unmarshal(body, &iresult) + if err != nil { + err := backoff.PermanentError{ + Err: fmt.Errorf("error unmarshaling JSON %s", err), + } + return false, false, &err + } + + for _, s := range iresult { + if s.Name == bucket { + bucketFound = true + if s.Installed == true { + bucketInstalled = true + } + } + + } + return bucketFound, bucketInstalled, nil +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md index 949b77e304e4..09a4a35c9bb7 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -26,6 +26,8 @@ The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). We thank all the authors who provided code to this library: * Felix Kollmann +* Nicolas Perraut +* @dirty49374 ## License diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go index ef18d8f97876..57f530ae83f6 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -4,7 +4,6 @@ package sequences import ( "syscall" - "unsafe" ) var ( @@ -27,7 +26,7 @@ func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING } - ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode)) if ret == 0 { return err } diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go new file mode 100644 index 000000000000..df61a6f2f6fe --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go @@ -0,0 +1,11 @@ +// +build linux darwin + +package sequences + +import ( + "fmt" +) + +func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { + return fmt.Errorf("windows only package") +} diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml index 8396f5d9d475..3498c53dcdd6 100644 --- a/vendor/github.com/lib/pq/.travis.yml +++ b/vendor/github.com/lib/pq/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.11.x - - 1.12.x + - 1.13.x + - 1.14.x - master sudo: true @@ -27,7 +27,7 @@ before_install: - ./.travis.sh client_configure - go get golang.org/x/tools/cmd/goimports - go get golang.org/x/lint/golint - - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.1 + - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3 before_script: - createdb pqgotest @@ -38,7 +38,7 @@ script: - > goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' - go vet ./... - - staticcheck -go 1.11 ./... + - staticcheck -go 1.13 ./... - golint ./... - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md deleted file mode 100644 index 84c937f1561c..000000000000 --- a/vendor/github.com/lib/pq/CONTRIBUTING.md +++ /dev/null @@ -1,29 +0,0 @@ -## Contributing to pq - -`pq` has a backlog of pull requests, but contributions are still very -much welcome. You can help with patch review, submitting bug reports, -or adding new functionality. There is no formal style guide, but -please conform to the style of existing code and general Go formatting -conventions when submitting patches. - -### Patch review - -Help review existing open pull requests by commenting on the code or -proposed functionality. - -### Bug reports - -We appreciate any bug reports, but especially ones with self-contained -(doesn't depend on code outside of pq), minimal (can't be simplified -further) test cases. It's especially helpful if you can submit a pull -request with just the failing test case (you'll probably want to -pattern it after the tests in -[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). - -### New functionality - -There are a number of pending patches for new functionality, so -additional feature patches will take a while to merge. Still, patches -are generally reviewed based on usefulness and complexity in addition -to time-in-queue, so if you have a knockout idea, take a shot. Feel -free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md index 385fe73508ea..c972a86a5795 100644 --- a/vendor/github.com/lib/pq/README.md +++ b/vendor/github.com/lib/pq/README.md @@ -1,21 +1,11 @@ # pq - A pure Go postgres driver for Go's database/sql package -[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) -[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) ## Install go get github.com/lib/pq -## Docs - -For detailed documentation and basic usage examples, please see the package -documentation at . - -## Tests - -`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. - ## Features * SSL @@ -29,67 +19,12 @@ documentation at . * Unix socket support * Notifications: `LISTEN`/`NOTIFY` * pgpass support +* GSS (Kerberos) auth -## Future / Things you can help with - -* Better COPY FROM / COPY TO (see discussion in #181) +## Tests -## Thank you (alphabetical) +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. -Some of these contributors are from the original library `bmizerany/pq.go` whose -code still exists in here. +## Status -* Andy Balholm (andybalholm) -* Ben Berkert (benburkert) -* Benjamin Heatwole (bheatwole) -* Bill Mill (llimllib) -* Bjørn Madsen (aeons) -* Blake Gentry (bgentry) -* Brad Fitzpatrick (bradfitz) -* Charlie Melbye (cmelbye) -* Chris Bandy (cbandy) -* Chris Gilling (cgilling) -* Chris Walsh (cwds) -* Dan Sosedoff (sosedoff) -* Daniel Farina (fdr) -* Eric Chlebek (echlebek) -* Eric Garrido (minusnine) -* Eric Urban (hydrogen18) -* Everyone at The Go Team -* Evan Shaw (edsrzf) -* Ewan Chou (coocood) -* Fazal Majid (fazalmajid) -* Federico Romero (federomero) -* Fumin (fumin) -* Gary Burd (garyburd) -* Heroku (heroku) -* James Pozdena (jpoz) -* Jason McVetta (jmcvetta) -* Jeremy Jay (pbnjay) -* Joakim Sernbrant (serbaut) -* John Gallagher (jgallagher) -* Jonathan Rudenberg (titanous) -* Joël Stemmer (jstemmer) -* Kamil Kisiel (kisielk) -* Kelly Dunn (kellydunn) -* Keith Rarick (kr) -* Kir Shatrov (kirs) -* Lann Martin (lann) -* Maciek Sakrejda (uhoh-itsmaciek) -* Marc Brinkmann (mbr) -* Marko Tiikkaja (johto) -* Matt Newberry (MattNewberry) -* Matt Robenolt (mattrobenolt) -* Martin Olsen (martinolsen) -* Mike Lewis (mikelikespie) -* Nicolas Patry (Narsil) -* Oliver Tonnhofer (olt) -* Patrick Hayes (phayes) -* Paul Hammond (paulhammond) -* Ryan Smith (ryandotsmith) -* Samuel Stauffer (samuel) -* Timothée Peignier (cyberdelia) -* Travis Cline (tmc) -* TruongSinh Tran-Nguyen (truongsinh) -* Yaismel Miranda (ympons) -* notedit (notedit) +This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained. diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index 55152b1242ce..f313c1498613 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -149,6 +149,15 @@ type conn struct { // If true this connection is in the middle of a COPY inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS } // Handle driver-side settings in parsed connection string. @@ -329,10 +338,6 @@ func (c *Connector) open(ctx context.Context) (cn *conn, err error) { func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { network, address := network(o) - // SSL is not necessary or supported over UNIX domain sockets - if network == "unix" { - o["sslmode"] = "disable" - } // Zero or not specified means wait indefinitely. if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { @@ -971,7 +976,13 @@ func (cn *conn) recv() (t byte, r *readBuf) { case 'E': panic(parseError(r)) case 'N': - // ignore + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } default: return } @@ -988,8 +999,14 @@ func (cn *conn) recv1Buf(r *readBuf) byte { } switch t { - case 'A', 'N': - // ignore + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } case 'S': cn.processParameterStatus(r) default: @@ -1057,7 +1074,10 @@ func isDriverSetting(key string) bool { return true case "binary_parameters": return true - + case "krbsrvname": + return true + case "krbspn": + return true default: return false } @@ -1137,6 +1157,59 @@ func (cn *conn) auth(r *readBuf, o values) { if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + case 10: sc := scram.NewClient(sha256.New, o["user"], o["password"]) sc.Step(nil) diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go index 0fdd06a617c3..09e2ea4648eb 100644 --- a/vendor/github.com/lib/pq/conn_go18.go +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -79,7 +79,7 @@ func (cn *conn) Ping(ctx context.Context) error { if finish := cn.watchCancel(ctx); finish != nil { defer finish() } - rows, err := cn.simpleQuery("SELECT 'lib/pq ping test';") + rows, err := cn.simpleQuery(";") if err != nil { return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger } diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go index 2f8ced6737d1..d7d47261569a 100644 --- a/vendor/github.com/lib/pq/connector.go +++ b/vendor/github.com/lib/pq/connector.go @@ -27,7 +27,7 @@ func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { return c.open(ctx) } -// Driver returnst the underlying driver of this Connector. +// Driver returns the underlying driver of this Connector. func (c *Connector) Driver() driver.Driver { return &Driver{} } @@ -106,5 +106,10 @@ func NewConnector(dsn string) (*Connector, error) { o["user"] = u } + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + return &Connector{opts: o, dialer: defaultDialer{}}, nil } diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go index 345c2398f6cd..38d5bb693f0b 100644 --- a/vendor/github.com/lib/pq/copy.go +++ b/vendor/github.com/lib/pq/copy.go @@ -49,6 +49,7 @@ type copyin struct { buffer []byte rowData chan []byte done chan bool + driver.Result closed bool @@ -151,8 +152,12 @@ func (ci *copyin) resploop() { switch t { case 'C': // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) case 'N': - // NoticeResponse + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } case 'Z': ci.cn.processReadyForQuery(&r) ci.done <- true @@ -199,6 +204,22 @@ func (ci *copyin) setError(err error) { ci.Unlock() } +func (ci *copyin) setResult(result driver.Result) { + ci.Lock() + ci.Result = result + ci.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.Lock() + result := ci.Result + if result == nil { + return driver.RowsAffected(0) + } + ci.Unlock() + return result +} + func (ci *copyin) NumInput() int { return -1 } @@ -229,7 +250,11 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { } if len(v) == 0 { - return nil, ci.Close() + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil } numValues := len(v) diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go index 2a60054e2e00..b57184801ba9 100644 --- a/vendor/github.com/lib/pq/doc.go +++ b/vendor/github.com/lib/pq/doc.go @@ -241,5 +241,28 @@ bytes by the PostgreSQL server. You can find a complete, working example of Listener usage at https://godoc.org/github.com/lib/pq/example/listen. + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. */ package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index a6902fae615c..c4dafe2705a3 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math" + "regexp" "strconv" "strings" "sync" @@ -16,6 +17,8 @@ import ( "github.com/lib/pq/oid" ) +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { switch v := x.(type) { case []byte: @@ -202,10 +205,27 @@ func mustParse(f string, typ oid.Oid, s []byte) time.Time { str[len(str)-3] == ':' { f += ":00" } + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } t, err := time.Parse(f, str) if err != nil { errorf("decode: %s", err) } + if is2400Time { + t = t.Add(24 * time.Hour) + } return t } @@ -487,7 +507,7 @@ func FormatTimestamp(t time.Time) []byte { b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) _, offset := t.Zone() - offset = offset % 60 + offset %= 60 if offset != 0 { // RFC3339Nano already printed the minus sign if offset < 0 { diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod index edf0b343fd17..b5a5639ab671 100644 --- a/vendor/github.com/lib/pq/go.mod +++ b/vendor/github.com/lib/pq/go.mod @@ -1 +1,3 @@ module github.com/lib/pq + +go 1.13 diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 000000000000..408ec01f9779 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 000000000000..01dd8c723ddb --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,71 @@ +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 850bb9040c30..5c421fdb8b56 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -4,6 +4,8 @@ package pq // This module contains support for Postgres LISTEN/NOTIFY. import ( + "context" + "database/sql/driver" "errors" "fmt" "sync" @@ -29,6 +31,61 @@ func recvNotification(r *readBuf) *Notification { return &Notification{bePid, channel, extra} } +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + const ( connStateIdle int32 = iota connStateExpectResponse @@ -174,8 +231,12 @@ func (l *ListenerConn) listenerConnLoop() (err error) { } l.replyChan <- message{t, nil} - case 'N', 'S': + case 'S': // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } default: return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) } diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go index 484f378a7604..477216b6008a 100644 --- a/vendor/github.com/lib/pq/scram/scram.go +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -94,7 +94,7 @@ func (c *Client) Out() []byte { return c.out.Bytes() } -// Err returns the error that ocurred, or nil if there were no errors. +// Err returns the error that occurred, or nil if there were no errors. func (c *Client) Err() error { return c.err } diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go index bf982524f937..a51019205824 100644 --- a/vendor/github.com/lib/pq/user_posix.go +++ b/vendor/github.com/lib/pq/user_posix.go @@ -1,6 +1,6 @@ // Package pq is a pure Go Postgres driver for the database/sql package. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun package pq diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 3378f7e66edf..20eea2b7ade1 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,7 @@ +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + ## 1.3.2 * Decode into interface type with a struct value is supported [GH-187] diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index b384d9d928f9..f41bcc58fbb1 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -906,11 +906,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re mType := reflect.MapOf(vKeyType, vElemType) vMap := reflect.MakeMap(mType) - err := d.decode(keyName, x.Interface(), vMap) + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) if err != nil { return err } + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + if squash { for _, k := range vMap.MapKeys() { valMap.SetMapIndex(k, vMap.MapIndex(k)) @@ -1154,13 +1165,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Not the most efficient way to do this but we can optimize later if // we want to. To convert from struct to struct we go to map first // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { return err } - result := d.decodeStructFromMap(name, mval, val) + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) return result default: diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 000000000000..65dc2850377d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index 848938a6d4ed..5e20aa4140c3 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -4,21 +4,13 @@ git: depth: 1 env: - GO111MODULE=on - - GO111MODULE=off -go: [ 1.11.x, 1.12.x ] -os: [ linux, osx ] -matrix: - exclude: - - go: 1.12.x - env: GO111MODULE=off - - go: 1.11.x - os: osx +go: [1.13.x, 1.14.x] +os: [linux, osx] install: - ./travis/install.sh - - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi - - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi script: - ./travis/cross_build.sh + - ./travis/lint.sh - export GOMAXPROCS=4 - export GORACE=halt_on_error=1 - go test -race -v ./... diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 51a7ab0cab91..584026d67caa 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,9 +1,32 @@ +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + # 1.4.2 * Fixes build break for plan9, nacl, solaris # 1.4.1 This new release introduces: * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) + * Remove dependency on golang.org/x/crypto (#932, #943) Fixes: * Fix Entry.WithContext method to return a copy of the initial entry (#941) @@ -11,7 +34,7 @@ Fixes: # 1.4.0 This new release introduces: * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). Fixes: diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index a4796eb07d46..5796706dbfa2 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,8 +1,28 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + **Seeing weird case-sensitive problems?** It's in the past been possible to import Logrus as both upper- and lower-case. Due to the Go package environment, this caused issues in the community and we needed a standard. Some environments @@ -15,11 +35,6 @@ comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). For an in-depth explanation of the casing issue, see [this comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). -**Are you interested in assisting in maintaining Logrus?** Currently I have a -lot of obligations, and I am unable to provide Logrus with the maintainership it -needs. If you'd like to help, please reach out to me at `simon at author's -username dot com`. - Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -187,7 +202,7 @@ func main() { log.Out = os.Stdout // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { // log.Out = file // } else { @@ -272,7 +287,7 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) #### Level logging @@ -354,6 +369,7 @@ The built-in logging formatters are: [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). @@ -364,8 +380,10 @@ Third party logging formatters: * [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. * [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -430,14 +448,14 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| |[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go @@ -465,7 +483,7 @@ func TestSomething(t*testing.T){ Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. ``` @@ -490,6 +508,6 @@ Situation when locking is not needed includes: 1) logger.Out is protected by locks. - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml index 96c2ce15f842..df9d65c3a5bb 100644 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -1,14 +1,14 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 63e25583cb03..f6e062a3466c 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -85,10 +85,15 @@ func NewEntry(logger *Logger) *Entry { } } +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { - serialized, err := entry.Logger.Formatter.Format(entry) + serialized, err := entry.Bytes() if err != nil { return "", err } @@ -103,7 +108,11 @@ func (entry *Entry) WithError(err error) *Entry { // Add a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } // Add a single field to the Entry. @@ -144,7 +153,11 @@ func (entry *Entry) WithFields(fields Fields) *Entry { // Overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} } // getPackageName reduces a fully qualified function name to the package name @@ -165,15 +178,20 @@ func getPackageName(f string) string { // getCaller retrieves the name of the first non-logrus calling function func getCaller() *runtime.Frame { - // cache this package's fully-qualified name callerInitOnce.Do(func() { - pcs := make([]uintptr, 2) + pcs := make([]uintptr, maximumCallerDepth) _ = runtime.Callers(0, pcs) - logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) - // now that we have the cache, we can skip a minimum count of known-logrus functions - // XXX this is dubious, the number of frames may vary + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + minimumCallerDepth = knownLogrusFrames }) @@ -187,7 +205,7 @@ func getCaller() *runtime.Frame { // If the caller isn't part of this package, we're done if pkg != logrusPackage { - return &f + return &f //nolint:scopelint } } @@ -217,9 +235,11 @@ func (entry Entry) log(level Level, msg string) { entry.Level = level entry.Message = msg + entry.Logger.mu.Lock() if entry.Logger.ReportCaller { entry.Caller = getCaller() } + entry.Logger.mu.Unlock() entry.fireHooks() @@ -255,11 +275,10 @@ func (entry *Entry) write() { serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } else { - _, err = entry.Logger.Out.Write(serialized) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } + return + } + if _, err = entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 62fc2f2193cd..42b04f6c8094 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -80,7 +80,7 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } -// WithTime creats an entry from the standard logger and overrides the time of +// WithTime creates an entry from the standard logger and overrides the time of // logs generated with it. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index 12fdf9898474..d41329679f83 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -2,9 +2,10 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.1.1 // indirect github.com/stretchr/testify v1.2.2 golang.org/x/sys v0.0.0-20190422165155-953cdadca894 ) + +go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 596c318b9f70..49c690f23837 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -1,16 +1,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index 098a21a06795..ba7f237112bd 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -28,6 +28,9 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. DataKey string @@ -110,6 +113,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { } encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) if f.PrettyPrint { encoder.SetIndent("", " ") } diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index c0c0b1e5590a..6fdda748e4db 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -68,10 +68,10 @@ func (mw *MutexWrap) Disable() { // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // -// var log = &Logger{ +// var log = &logrus.Logger{ // Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), +// Formatter: new(logrus.JSONFormatter), +// Hooks: make(logrus.LevelHooks), // Level: logrus.DebugLevel, // } // @@ -100,8 +100,9 @@ func (logger *Logger) releaseEntry(entry *Entry) { logger.entryPool.Put(entry) } -// Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index 8644761f73cf..2f16224cb9ff 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -51,7 +51,7 @@ func (level *Level) UnmarshalText(text []byte) error { return err } - *level = Level(l) + *level = l return nil } diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 3c4f43f91cd1..499789984d2b 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,4 +1,5 @@ // +build darwin dragonfly freebsd netbsd openbsd +// +build !js package logrus @@ -10,4 +11,3 @@ func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } - diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000000..ebdae3ec6262 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 355dc966f00b..cc4fe6e31776 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,4 +1,5 @@ // +build linux aix +// +build !js package logrus @@ -10,4 +11,3 @@ func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } - diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index e01587c437de..3c28b54cabae 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -6,9 +6,11 @@ import ( "os" "runtime" "sort" + "strconv" "strings" "sync" "time" + "unicode/utf8" ) const ( @@ -32,6 +34,14 @@ type TextFormatter struct { // Force disabling colors. DisableColors bool + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ EnvironmentOverrideColors bool @@ -57,6 +67,10 @@ type TextFormatter struct { // Disables the truncation of the level text to 4 characters. DisableLevelTruncation bool + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool @@ -79,23 +93,32 @@ type TextFormatter struct { CallerPrettyfier func(*runtime.Frame) (function string, file string) terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } } func (f *TextFormatter) isColored() bool { isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) if f.EnvironmentOverrideColors { - if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": isColored = true - } else if ok && force == "0" { - isColored = false - } else if os.Getenv("CLICOLOR") == "0" { + case ok && force == "0", os.Getenv("CLICOLOR") == "0": isColored = false } } @@ -217,9 +240,18 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation { + if !f.DisableLevelTruncation && !f.PadLevelText { levelText = levelText[0:4] } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } // Remove a single newline if it already exists in the message to keep // the behavior of logrus text_formatter the same as the stdlib log package @@ -243,11 +275,12 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } } - if f.DisableTimestamp { + switch { + case f.DisableTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - } else if !f.FullTimestamp { + case !f.FullTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - } else { + default: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { @@ -258,9 +291,15 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } if f.QuoteEmptyFields && len(text) == 0 { return true } + if f.DisableQuote { + return false + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 9e1f7513597d..72e8e3a1b65f 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -6,10 +6,16 @@ import ( "runtime" ) +// Writer at INFO level. See WriterLevel for details. func (logger *Logger) Writer() *io.PipeWriter { return logger.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 7c59f9bc4f90..43f7aa065c3e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -204,7 +204,7 @@ github.com/cloudfoundry-community/go-cfclient github.com/cockroachdb/cockroach-go/crdb # github.com/containerd/containerd v1.3.4 github.com/containerd/containerd/errdefs -# github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc +# github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe github.com/containerd/continuity/fs github.com/containerd/continuity/pathdriver github.com/containerd/continuity/sysx @@ -214,6 +214,14 @@ github.com/coreos/go-oidc github.com/coreos/go-semver/semver # github.com/coreos/go-systemd/v22 v22.0.0 github.com/coreos/go-systemd/v22/journal +# github.com/couchbase/gocb/v2 v2.1.4 +github.com/couchbase/gocb/v2 +github.com/couchbase/gocb/v2/search +# github.com/couchbase/gocbcore/v9 v9.0.4 +github.com/couchbase/gocbcore/v9 +github.com/couchbase/gocbcore/v9/connstr +github.com/couchbase/gocbcore/v9/memd +github.com/couchbase/gocbcore/v9/scram # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc @@ -416,7 +424,7 @@ github.com/hashicorp/go-sockaddr/template github.com/hashicorp/go-syslog # github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/go-uuid -# github.com/hashicorp/go-version v1.2.0 +# github.com/hashicorp/go-version v1.2.1 github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/golang-lru @@ -468,6 +476,8 @@ github.com/hashicorp/vault-plugin-auth-kerberos github.com/hashicorp/vault-plugin-auth-kubernetes # github.com/hashicorp/vault-plugin-auth-oci v0.5.5 github.com/hashicorp/vault-plugin-auth-oci +# github.com/hashicorp/vault-plugin-database-couchbase v0.1.0 +github.com/hashicorp/vault-plugin-database-couchbase # github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4 github.com/hashicorp/vault-plugin-database-elasticsearch # github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.2 @@ -639,13 +649,13 @@ github.com/keybase/go-crypto/openpgp/errors github.com/keybase/go-crypto/openpgp/packet github.com/keybase/go-crypto/openpgp/s2k github.com/keybase/go-crypto/rsa -# github.com/konsorten/go-windows-terminal-sequences v1.0.1 +# github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/konsorten/go-windows-terminal-sequences # github.com/kr/pretty v0.2.0 github.com/kr/pretty # github.com/kr/text v0.2.0 github.com/kr/text -# github.com/lib/pq v1.2.0 +# github.com/lib/pq v1.8.0 github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram @@ -675,7 +685,7 @@ github.com/mitchellh/gox github.com/mitchellh/hashstructure # github.com/mitchellh/iochan v1.0.0 github.com/mitchellh/iochan -# github.com/mitchellh/mapstructure v1.3.2 +# github.com/mitchellh/mapstructure v1.3.3 github.com/mitchellh/mapstructure # github.com/mitchellh/pointerstructure v1.0.0 github.com/mitchellh/pointerstructure @@ -803,7 +813,7 @@ github.com/shirou/gopsutil/internal/common github.com/shirou/gopsutil/mem github.com/shirou/gopsutil/net github.com/shirou/gopsutil/process -# github.com/sirupsen/logrus v1.4.2 +# github.com/sirupsen/logrus v1.6.0 github.com/sirupsen/logrus # github.com/stretchr/testify v1.5.1 github.com/stretchr/testify/assert