diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 2a896a55bd38..8401fbe22c17 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -390,6 +390,7 @@ func TestPredict_Plugins(t *testing.T) { "rabbitmq", "radius", "redshift-database-plugin", + "snowflake-database-plugin", "ssh", "totp", "transform", diff --git a/go.mod b/go.mod index 498cb972a00e..b496d4fa502c 100644 --- a/go.mod +++ b/go.mod @@ -85,6 +85,7 @@ require ( github.com/hashicorp/vault-plugin-database-couchbase v0.2.1 github.com/hashicorp/vault-plugin-database-elasticsearch v0.6.1 github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.1 + github.com/hashicorp/vault-plugin-database-snowflake v0.1.0 github.com/hashicorp/vault-plugin-mock v0.16.1 github.com/hashicorp/vault-plugin-secrets-ad v0.8.0 github.com/hashicorp/vault-plugin-secrets-alicloud v0.7.0 diff --git a/go.sum b/go.sum index 72fd8eadf5bf..352e574e2a68 100644 --- a/go.sum +++ b/go.sum @@ -145,6 +145,8 @@ github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= @@ -275,6 +277,8 @@ github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72 github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= @@ -453,6 +457,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -648,6 +654,8 @@ github.com/hashicorp/vault-plugin-database-elasticsearch v0.6.1 h1:C3NF3pVF7/Emx github.com/hashicorp/vault-plugin-database-elasticsearch v0.6.1/go.mod h1:813Nvr1IQqAKdlk3yIY97M5WyxMhWOrXtYioPf9PqJg= github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.1 h1:Yc8ZJJINvCH6JcJ8uvNkZ6W33KYzVdG4zI98dvbQ8lE= github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.1/go.mod h1:2So20ndRRsDAMDyG52az6nd7NwFOZTQER9EsrgPCgVg= +github.com/hashicorp/vault-plugin-database-snowflake v0.1.0 h1:SfzEIHgPqaoDfSN8Key2yLXZfUI3AhI45OEn5jXN8Eo= +github.com/hashicorp/vault-plugin-database-snowflake v0.1.0/go.mod h1:EUOuoWLg+RwiIjxydK+hH7Eeb4x66cESYNmTHLqqbuM= github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= github.com/hashicorp/vault-plugin-secrets-ad v0.8.0 h1:SBOxEjYtxkipmp8AC7Yy3vjBVM5H24YM48WKCSCV+zA= @@ -953,6 +961,8 @@ github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1053,6 +1063,10 @@ github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:X github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce h1:CGR1hXCOeoZ1aJhCs8qdKJuEu3xoZnxsLcYoh5Bnr+4= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= +github.com/snowflakedb/gosnowflake v1.3.11 h1:4VATaWPZv2HEh9bkZG5LaMux4WRiZJDu/PvvMCzrpUg= +github.com/snowflakedb/gosnowflake v1.3.11/go.mod h1:+BMe9ivHWpzcXbM1qSIxWZ8qpWGBBaA46o9Z1qSfrNg= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -1085,6 +1099,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1183,6 +1198,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index 364bfc10a364..037773aeb33a 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -10,11 +10,18 @@ import ( credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" credOCI "github.com/hashicorp/vault-plugin-auth-oci" - "github.com/hashicorp/vault/sdk/database/helper/credsutil" - dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase" dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" + dbSnowflake "github.com/hashicorp/vault-plugin-database-snowflake" + logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin" + logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud" + logicalAzure "github.com/hashicorp/vault-plugin-secrets-azure" + logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin" + logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" + logicalOpenLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" credAppId "github.com/hashicorp/vault/builtin/credential/app-id" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" credAws "github.com/hashicorp/vault/builtin/credential/aws" @@ -24,25 +31,6 @@ import ( credOkta "github.com/hashicorp/vault/builtin/credential/okta" credRadius "github.com/hashicorp/vault/builtin/credential/radius" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - dbCass "github.com/hashicorp/vault/plugins/database/cassandra" - dbHana "github.com/hashicorp/vault/plugins/database/hana" - dbInflux "github.com/hashicorp/vault/plugins/database/influxdb" - dbMongo "github.com/hashicorp/vault/plugins/database/mongodb" - dbMssql "github.com/hashicorp/vault/plugins/database/mssql" - dbMysql "github.com/hashicorp/vault/plugins/database/mysql" - dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" - dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/logical" - - logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin" - logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud" - logicalAzure "github.com/hashicorp/vault-plugin-secrets-azure" - logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin" - logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms" - logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" - logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" - logicalOpenLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" logicalAws "github.com/hashicorp/vault/builtin/logical/aws" logicalCass "github.com/hashicorp/vault/builtin/logical/cassandra" logicalConsul "github.com/hashicorp/vault/builtin/logical/consul" @@ -56,6 +44,17 @@ import ( logicalSsh "github.com/hashicorp/vault/builtin/logical/ssh" logicalTotp "github.com/hashicorp/vault/builtin/logical/totp" logicalTransit "github.com/hashicorp/vault/builtin/logical/transit" + dbCass "github.com/hashicorp/vault/plugins/database/cassandra" + dbHana "github.com/hashicorp/vault/plugins/database/hana" + dbInflux "github.com/hashicorp/vault/plugins/database/influxdb" + dbMongo "github.com/hashicorp/vault/plugins/database/mongodb" + dbMssql "github.com/hashicorp/vault/plugins/database/mssql" + dbMysql "github.com/hashicorp/vault/plugins/database/mysql" + dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" + dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" ) // Registry is inherently thread-safe because it's immutable. @@ -110,6 +109,7 @@ func newRegistry() *registry { "mssql-database-plugin": dbMssql.New, "postgresql-database-plugin": dbPostgres.New, "redshift-database-plugin": dbRedshift.New, + "snowflake-database-plugin": dbSnowflake.New, }, logicalBackends: map[string]logical.Factory{ "ad": logicalAd.Factory, diff --git a/vault/plugin_catalog_test.go b/vault/plugin_catalog_test.go index 2a56cfe2e4c0..3af87854b542 100644 --- a/vault/plugin_catalog_test.go +++ b/vault/plugin_catalog_test.go @@ -3,6 +3,8 @@ package vault import ( "context" "fmt" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "io/ioutil" "os" "path/filepath" @@ -11,8 +13,6 @@ import ( "testing" "github.com/hashicorp/vault/helper/builtinplugins" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/pluginutil" ) func TestPluginCatalog_CRUD(t *testing.T) { diff --git a/vault/testing.go b/vault/testing.go index e8188dfa07fc..fabf2308e8a7 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -2089,6 +2089,7 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { "mssql-database-plugin", "postgresql-database-plugin", "redshift-database-plugin", + "snowflake-database-plugin", } } diff --git a/vendor/github.com/apache/arrow/go/arrow/.editorconfig b/vendor/github.com/apache/arrow/go/arrow/.editorconfig new file mode 100644 index 000000000000..a7ceaf938f92 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/.editorconfig @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +root = true + +[*.tmpl] +indent_style = tab +indent_size = 4 \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/arrow/.gitignore b/vendor/github.com/apache/arrow/go/arrow/.gitignore new file mode 100644 index 000000000000..d4b831ae811d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/.gitignore @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### Go template +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +bin/ +vendor/ \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/arrow/Gopkg.lock b/vendor/github.com/apache/arrow/go/arrow/Gopkg.lock new file mode 100644 index 000000000000..143e4f93b5ee --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/Gopkg.lock @@ -0,0 +1,44 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:2d0dc026c4aef5e2f3a0e06a4dabe268b840d8f63190cf6894e02134a03f52c5" + name = "github.com/stretchr/testify" + packages = ["assert"] + pruneopts = "" + revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" + version = "v1.2.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/pkg/errors", + "github.com/stretchr/testify/assert", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/apache/arrow/go/arrow/Gopkg.toml b/vendor/github.com/apache/arrow/go/arrow/Gopkg.toml new file mode 100644 index 000000000000..b27807d69f95 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/Gopkg.toml @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.0" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.1" \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/arrow/Makefile b/vendor/github.com/apache/arrow/go/arrow/Makefile new file mode 100644 index 000000000000..9c4a23262d0b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/Makefile @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +GO_BUILD=go build +GO_GEN=go generate +GO_TEST?=go test +GOPATH=$(realpath ../../../../../..) + +GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') +ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') +SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print) + +.PHONEY: test bench assembly generate + +assembly: + @$(MAKE) -C memory assembly + @$(MAKE) -C math assembly + +generate: bin/tmpl + bin/tmpl -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen_test.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl + bin/tmpl -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl + @$(MAKE) -C math generate + +fmt: $(SOURCES_NO_VENDOR) + goimports -w $^ + +bench: $(GO_SOURCES) | assembly + $(GO_TEST) $(GO_TEST_ARGS) -bench=. -run=- ./... + +bench-noasm: $(GO_SOURCES) + $(GO_TEST) $(GO_TEST_ARGS) -tags='noasm' -bench=. -run=- ./... + +test: $(GO_SOURCES) | assembly + $(GO_TEST) $(GO_TEST_ARGS) ./... + +test-noasm: $(GO_SOURCES) + $(GO_TEST) $(GO_TEST_ARGS) -tags='noasm' ./... + +bin/tmpl: _tools/tmpl/main.go + $(GO_BUILD) -o $@ ./_tools/tmpl + diff --git a/vendor/github.com/apache/arrow/go/arrow/array/array.go b/vendor/github.com/apache/arrow/go/arrow/array/array.go new file mode 100644 index 000000000000..9cbaef9ff123 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/array.go @@ -0,0 +1,208 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array // import "github.com/apache/arrow/go/arrow/array" + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" +) + +// A type which satisfies array.Interface represents an immutable sequence of values. +type Interface interface { + // DataType returns the type metadata for this instance. + DataType() arrow.DataType + + // NullN returns the number of null values in the array. + NullN() int + + // NullBitmapBytes returns a byte slice of the validity bitmap. + NullBitmapBytes() []byte + + // IsNull returns true if value at index is null. + // NOTE: IsNull will panic if NullBitmapBytes is not empty and 0 > i ≥ Len. + IsNull(i int) bool + + // IsValid returns true if value at index is not null. + // NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i ≥ Len. + IsValid(i int) bool + + Data() *Data + + // Len returns the number of elements in the array. + Len() int + + // Retain increases the reference count by 1. + // Retain may be called simultaneously from multiple goroutines. + Retain() + + // Release decreases the reference count by 1. + // Release may be called simultaneously from multiple goroutines. + // When the reference count goes to zero, the memory is freed. + Release() +} + +const ( + // UnknownNullCount specifies the NullN should be calculated from the null bitmap buffer. + UnknownNullCount = -1 +) + +type array struct { + refCount int64 + data *Data + nullBitmapBytes []byte +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (a *array) Retain() { + atomic.AddInt64(&a.refCount, 1) +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (a *array) Release() { + debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases") + + if atomic.AddInt64(&a.refCount, -1) == 0 { + a.data.Release() + a.data, a.nullBitmapBytes = nil, nil + } +} + +// DataType returns the type metadata for this instance. +func (a *array) DataType() arrow.DataType { return a.data.dtype } + +// NullN returns the number of null values in the array. +func (a *array) NullN() int { + if a.data.nulls < 0 { + a.data.nulls = a.data.length - bitutil.CountSetBits(a.nullBitmapBytes, a.data.offset, a.data.length) + } + return a.data.nulls +} + +// NullBitmapBytes returns a byte slice of the validity bitmap. +func (a *array) NullBitmapBytes() []byte { return a.nullBitmapBytes } + +func (a *array) Data() *Data { return a.data } + +// Len returns the number of elements in the array. +func (a *array) Len() int { return a.data.length } + +// IsNull returns true if value at index is null. +// NOTE: IsNull will panic if NullBitmapBytes is not empty and 0 > i ≥ Len. +func (a *array) IsNull(i int) bool { + return len(a.nullBitmapBytes) != 0 && bitutil.BitIsNotSet(a.nullBitmapBytes, a.data.offset+i) +} + +// IsValid returns true if value at index is not null. +// NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i ≥ Len. +func (a *array) IsValid(i int) bool { + return len(a.nullBitmapBytes) == 0 || bitutil.BitIsSet(a.nullBitmapBytes, a.data.offset+i) +} + +func (a *array) setData(data *Data) { + // Retain before releasing in case a.data is the same as data. + data.Retain() + + if a.data != nil { + a.data.Release() + } + + if len(data.buffers) > 0 && data.buffers[0] != nil { + a.nullBitmapBytes = data.buffers[0].Bytes() + } + a.data = data +} + +func (a *array) Offset() int { + return a.data.Offset() +} + +type arrayConstructorFn func(*Data) Interface + +var ( + makeArrayFn [32]arrayConstructorFn +) + +func unsupportedArrayType(data *Data) Interface { + panic("unsupported data type: " + data.dtype.ID().String()) +} + +func invalidDataType(data *Data) Interface { + panic("invalid data type: " + data.dtype.ID().String()) +} + +// MakeFromData constructs a strongly-typed array instance from generic Data. +func MakeFromData(data *Data) Interface { + return makeArrayFn[byte(data.dtype.ID()&0x1f)](data) +} + +// NewSlice constructs a zero-copy slice of the array with the indicated +// indices i and j, corresponding to array[i:j]. +// The returned array must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the input array. +// NewSlice panics if j < i. +func NewSlice(arr Interface, i, j int64) Interface { + data := NewSliceData(arr.Data(), i, j) + slice := MakeFromData(data) + data.Release() + return slice +} + +func init() { + makeArrayFn = [...]arrayConstructorFn{ + arrow.NULL: func(data *Data) Interface { return NewNullData(data) }, + arrow.BOOL: func(data *Data) Interface { return NewBooleanData(data) }, + arrow.UINT8: func(data *Data) Interface { return NewUint8Data(data) }, + arrow.INT8: func(data *Data) Interface { return NewInt8Data(data) }, + arrow.UINT16: func(data *Data) Interface { return NewUint16Data(data) }, + arrow.INT16: func(data *Data) Interface { return NewInt16Data(data) }, + arrow.UINT32: func(data *Data) Interface { return NewUint32Data(data) }, + arrow.INT32: func(data *Data) Interface { return NewInt32Data(data) }, + arrow.UINT64: func(data *Data) Interface { return NewUint64Data(data) }, + arrow.INT64: func(data *Data) Interface { return NewInt64Data(data) }, + arrow.FLOAT16: func(data *Data) Interface { return NewFloat16Data(data) }, + arrow.FLOAT32: func(data *Data) Interface { return NewFloat32Data(data) }, + arrow.FLOAT64: func(data *Data) Interface { return NewFloat64Data(data) }, + arrow.STRING: func(data *Data) Interface { return NewStringData(data) }, + arrow.BINARY: func(data *Data) Interface { return NewBinaryData(data) }, + arrow.FIXED_SIZE_BINARY: func(data *Data) Interface { return NewFixedSizeBinaryData(data) }, + arrow.DATE32: func(data *Data) Interface { return NewDate32Data(data) }, + arrow.DATE64: func(data *Data) Interface { return NewDate64Data(data) }, + arrow.TIMESTAMP: func(data *Data) Interface { return NewTimestampData(data) }, + arrow.TIME32: func(data *Data) Interface { return NewTime32Data(data) }, + arrow.TIME64: func(data *Data) Interface { return NewTime64Data(data) }, + arrow.INTERVAL: func(data *Data) Interface { return NewIntervalData(data) }, + arrow.DECIMAL: func(data *Data) Interface { return NewDecimal128Data(data) }, + arrow.LIST: func(data *Data) Interface { return NewListData(data) }, + arrow.STRUCT: func(data *Data) Interface { return NewStructData(data) }, + arrow.UNION: unsupportedArrayType, + arrow.DICTIONARY: unsupportedArrayType, + arrow.MAP: unsupportedArrayType, + arrow.EXTENSION: unsupportedArrayType, + arrow.FIXED_SIZE_LIST: func(data *Data) Interface { return NewFixedSizeListData(data) }, + arrow.DURATION: func(data *Data) Interface { return NewDurationData(data) }, + + // invalid data types to fill out array size 2⁵-1 + 31: invalidDataType, + } +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/binary.go b/vendor/github.com/apache/arrow/go/arrow/array/binary.go new file mode 100644 index 000000000000..ed58910919a6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/binary.go @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strings" + "unsafe" + + "github.com/apache/arrow/go/arrow" +) + +// A type which represents an immutable sequence of variable-length binary strings. +type Binary struct { + array + valueOffsets []int32 + valueBytes []byte +} + +// NewBinaryData constructs a new Binary array from data. +func NewBinaryData(data *Data) *Binary { + a := &Binary{} + a.refCount = 1 + a.setData(data) + return a +} + +// Value returns the slice at index i. This value should not be mutated. +func (a *Binary) Value(i int) []byte { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + idx := a.array.data.offset + i + return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]] +} + +// ValueString returns the string at index i without performing additional allocations. +// The string is only valid for the lifetime of the Binary array. +func (a *Binary) ValueString(i int) string { + b := a.Value(i) + return *(*string)(unsafe.Pointer(&b)) +} + +func (a *Binary) ValueOffset(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return int(a.valueOffsets[a.array.data.offset+i]) +} + +func (a *Binary) ValueLen(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + beg := a.array.data.offset + i + return int(a.valueOffsets[beg+1] - a.valueOffsets[beg]) +} + +func (a *Binary) ValueOffsets() []int32 { + beg := a.array.data.offset + end := beg + a.array.data.length + 1 + return a.valueOffsets[beg:end] +} + +func (a *Binary) ValueBytes() []byte { + beg := a.array.data.offset + end := beg + a.array.data.length + return a.valueBytes[a.valueOffsets[beg]:a.valueOffsets[end]] +} + +func (a *Binary) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.ValueString(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Binary) setData(data *Data) { + if len(data.buffers) != 3 { + panic("len(data.buffers) != 3") + } + + a.array.setData(data) + + if valueData := data.buffers[2]; valueData != nil { + a.valueBytes = valueData.Bytes() + } + + if valueOffsets := data.buffers[1]; valueOffsets != nil { + a.valueOffsets = arrow.Int32Traits.CastFromBytes(valueOffsets.Bytes()) + } +} + +func arrayEqualBinary(left, right *Binary) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if bytes.Compare(left.Value(i), right.Value(i)) != 0 { + return false + } + } + return true +} + +var ( + _ Interface = (*Binary)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/binarybuilder.go b/vendor/github.com/apache/arrow/go/arrow/array/binarybuilder.go new file mode 100644 index 000000000000..17562fcc7653 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/binarybuilder.go @@ -0,0 +1,217 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "math" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +const ( + binaryArrayMaximumCapacity = math.MaxInt32 +) + +// A BinaryBuilder is used to build a Binary array using the Append methods. +type BinaryBuilder struct { + builder + + dtype arrow.BinaryDataType + offsets *int32BufferBuilder + values *byteBufferBuilder +} + +func NewBinaryBuilder(mem memory.Allocator, dtype arrow.BinaryDataType) *BinaryBuilder { + b := &BinaryBuilder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + offsets: newInt32BufferBuilder(mem), + values: newByteBufferBuilder(mem), + } + return b +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *BinaryBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.offsets != nil { + b.offsets.Release() + b.offsets = nil + } + if b.values != nil { + b.values.Release() + b.values = nil + } + } +} + +func (b *BinaryBuilder) Append(v []byte) { + b.Reserve(1) + b.appendNextOffset() + b.values.Append(v) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *BinaryBuilder) AppendString(v string) { + b.Append([]byte(v)) +} + +func (b *BinaryBuilder) AppendNull() { + b.Reserve(1) + b.appendNextOffset() + b.UnsafeAppendBoolToBitmap(false) +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *BinaryBuilder) AppendValues(v [][]byte, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for _, vv := range v { + b.appendNextOffset() + b.values.Append(vv) + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +// AppendStringValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *BinaryBuilder) AppendStringValues(v []string, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for _, vv := range v { + b.appendNextOffset() + b.values.Append([]byte(vv)) + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *BinaryBuilder) Value(i int) []byte { + offsets := b.offsets.Values() + start := int(offsets[i]) + var end int + if i == (b.length - 1) { + end = b.values.Len() + } else { + end = int(offsets[i+1]) + } + return b.values.Bytes()[start:end] +} + +func (b *BinaryBuilder) init(capacity int) { + b.builder.init(capacity) + b.offsets.resize((capacity + 1) * arrow.Int32SizeBytes) +} + +// DataLen returns the number of bytes in the data array. +func (b *BinaryBuilder) DataLen() int { return b.values.length } + +// DataCap returns the total number of bytes that can be stored +// without allocating additional memory. +func (b *BinaryBuilder) DataCap() int { return b.values.capacity } + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *BinaryBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// ReserveData ensures there is enough space for appending n bytes +// by checking the capacity and resizing the data buffer if necessary. +func (b *BinaryBuilder) ReserveData(n int) { + if b.values.capacity < b.values.length+n { + b.values.resize(b.values.Len() + n) + } +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may be reduced. +func (b *BinaryBuilder) Resize(n int) { + b.offsets.resize((n + 1) * arrow.Int32SizeBytes) + b.builder.resize(n, b.init) +} + +// NewArray creates a Binary array from the memory buffers used by the builder and resets the BinaryBuilder +// so it can be used to build a new array. +func (b *BinaryBuilder) NewArray() Interface { + return b.NewBinaryArray() +} + +// NewBinaryArray creates a Binary array from the memory buffers used by the builder and resets the BinaryBuilder +// so it can be used to build a new array. +func (b *BinaryBuilder) NewBinaryArray() (a *Binary) { + data := b.newData() + a = NewBinaryData(data) + data.Release() + return +} + +func (b *BinaryBuilder) newData() (data *Data) { + b.appendNextOffset() + offsets, values := b.offsets.Finish(), b.values.Finish() + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, offsets, values}, nil, b.nulls, 0) + if offsets != nil { + offsets.Release() + } + + if values != nil { + values.Release() + } + + b.builder.reset() + + return +} + +func (b *BinaryBuilder) appendNextOffset() { + numBytes := b.values.Len() + // TODO(sgc): check binaryArrayMaximumCapacity? + b.offsets.AppendValue(int32(numBytes)) +} + +var ( + _ Builder = (*BinaryBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/boolean.go b/vendor/github.com/apache/arrow/go/arrow/array/boolean.go new file mode 100644 index 000000000000..e352e6e043b3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/boolean.go @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/memory" +) + +// A type which represents an immutable sequence of boolean values. +type Boolean struct { + array + values []byte +} + +// NewBoolean creates a boolean array from the data memory.Buffer and contains length elements. +// The nullBitmap buffer can be nil of there are no null values. +// If nulls is not known, use UnknownNullCount to calculate the value of NullN at runtime from the nullBitmap buffer. +func NewBoolean(length int, data *memory.Buffer, nullBitmap *memory.Buffer, nulls int) *Boolean { + return NewBooleanData(NewData(arrow.FixedWidthTypes.Boolean, length, []*memory.Buffer{nullBitmap, data}, nil, nulls, 0)) +} + +func NewBooleanData(data *Data) *Boolean { + a := &Boolean{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *Boolean) Value(i int) bool { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return bitutil.BitIsSet(a.values, a.array.data.offset+i) +} + +func (a *Boolean) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Boolean) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = vals.Bytes() + } +} + +func arrayEqualBoolean(left, right *Boolean) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +var ( + _ Interface = (*Boolean)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/booleanbuilder.go b/vendor/github.com/apache/arrow/go/arrow/array/booleanbuilder.go new file mode 100644 index 000000000000..4a38156849af --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/booleanbuilder.go @@ -0,0 +1,165 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +type BooleanBuilder struct { + builder + + data *memory.Buffer + rawData []byte +} + +func NewBooleanBuilder(mem memory.Allocator) *BooleanBuilder { + return &BooleanBuilder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *BooleanBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *BooleanBuilder) Append(v bool) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *BooleanBuilder) AppendByte(v byte) { + b.Reserve(1) + b.UnsafeAppend(v != 0) +} + +func (b *BooleanBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *BooleanBuilder) UnsafeAppend(v bool) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + if v { + bitutil.SetBit(b.rawData, b.length) + } else { + bitutil.ClearBit(b.rawData, b.length) + } + b.length++ +} + +func (b *BooleanBuilder) AppendValues(v []bool, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for i, vv := range v { + bitutil.SetBitTo(b.rawData, b.length+i, vv) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *BooleanBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.BooleanTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = b.data.Bytes() +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *BooleanBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *BooleanBuilder) Resize(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.init) + b.data.Resize(arrow.BooleanTraits.BytesRequired(n)) + b.rawData = b.data.Bytes() + } +} + +// NewArray creates a Boolean array from the memory buffers used by the builder and resets the BooleanBuilder +// so it can be used to build a new array. +func (b *BooleanBuilder) NewArray() Interface { + return b.NewBooleanArray() +} + +// NewBooleanArray creates a Boolean array from the memory buffers used by the builder and resets the BooleanBuilder +// so it can be used to build a new array. +func (b *BooleanBuilder) NewBooleanArray() (a *Boolean) { + data := b.newData() + a = NewBooleanData(data) + data.Release() + return +} + +func (b *BooleanBuilder) newData() *Data { + bytesRequired := arrow.BooleanTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + res := NewData(arrow.FixedWidthTypes.Boolean, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return res +} + +var ( + _ Builder = (*BooleanBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder.go b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder.go new file mode 100644 index 000000000000..bcc7153b5598 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder.go @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// A bufferBuilder provides common functionality for populating memory with a sequence of type-specific values. +// Specialized implementations provide type-safe APIs for appending and accessing the memory. +type bufferBuilder struct { + refCount int64 + mem memory.Allocator + buffer *memory.Buffer + length int + capacity int + + bytes []byte +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *bufferBuilder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *bufferBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.buffer != nil { + b.buffer.Release() + b.buffer, b.bytes = nil, nil + } + } +} + +// Len returns the length of the memory buffer in bytes. +func (b *bufferBuilder) Len() int { return b.length } + +// Cap returns the total number of bytes that can be stored without allocating additional memory. +func (b *bufferBuilder) Cap() int { return b.capacity } + +// Bytes returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *bufferBuilder) Bytes() []byte { return b.bytes[:b.length] } + +func (b *bufferBuilder) resize(elements int) { + if b.buffer == nil { + b.buffer = memory.NewResizableBuffer(b.mem) + } + + b.buffer.Resize(elements) + oldCapacity := b.capacity + b.capacity = b.buffer.Cap() + b.bytes = b.buffer.Buf() + + if b.capacity > oldCapacity { + memory.Set(b.bytes[oldCapacity:], 0) + } +} + +// Advance increases the buffer by length and initializes the skipped bytes to zero. +func (b *bufferBuilder) Advance(length int) { + if b.capacity < b.length+length { + newCapacity := bitutil.NextPowerOf2(b.length + length) + b.resize(newCapacity) + } + b.length += length +} + +// Append appends the contents of v to the buffer, resizing it if necessary. +func (b *bufferBuilder) Append(v []byte) { + if b.capacity < b.length+len(v) { + newCapacity := bitutil.NextPowerOf2(b.length + len(v)) + b.resize(newCapacity) + } + b.unsafeAppend(v) +} + +// Reset returns the buffer to an empty state. Reset releases the memory and sets the length and capacity to zero. +func (b *bufferBuilder) Reset() { + if b.buffer != nil { + b.buffer.Release() + } + b.buffer, b.bytes = nil, nil + b.capacity, b.length = 0, 0 +} + +// Finish TODO(sgc) +func (b *bufferBuilder) Finish() (buffer *memory.Buffer) { + if b.length > 0 { + b.buffer.ResizeNoShrink(b.length) + } + buffer = b.buffer + b.buffer = nil + b.Reset() + return +} + +func (b *bufferBuilder) unsafeAppend(data []byte) { + copy(b.bytes[b.length:], data) + b.length += len(data) +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_byte.go b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_byte.go new file mode 100644 index 000000000000..f5f5445b7ac4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_byte.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import "github.com/apache/arrow/go/arrow/memory" + +type byteBufferBuilder struct { + bufferBuilder +} + +func newByteBufferBuilder(mem memory.Allocator) *byteBufferBuilder { + return &byteBufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}} +} + +func (b *byteBufferBuilder) Values() []byte { return b.Bytes() } +func (b *byteBufferBuilder) Value(i int) byte { return b.bytes[i] } diff --git a/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_numeric.gen.go b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_numeric.gen.go new file mode 100644 index 000000000000..4cdf42685f90 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_numeric.gen.go @@ -0,0 +1,58 @@ +// Code generated by array/bufferbuilder_numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/memory" +) + +type int32BufferBuilder struct { + bufferBuilder +} + +func newInt32BufferBuilder(mem memory.Allocator) *int32BufferBuilder { + return &int32BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}} +} + +// AppendValues appends the contents of v to the buffer, growing the buffer as needed. +func (b *int32BufferBuilder) AppendValues(v []int32) { b.Append(arrow.Int32Traits.CastToBytes(v)) } + +// Values returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *int32BufferBuilder) Values() []int32 { return arrow.Int32Traits.CastFromBytes(b.Bytes()) } + +// Value returns the int32 element at the index i. Value will panic if i is negative or ≥ Len. +func (b *int32BufferBuilder) Value(i int) int32 { return b.Values()[i] } + +// Len returns the number of int32 elements in the buffer. +func (b *int32BufferBuilder) Len() int { return b.length / arrow.Int32SizeBytes } + +// AppendValue appends v to the buffer, growing the buffer as needed. +func (b *int32BufferBuilder) AppendValue(v int32) { + if b.capacity < b.length+arrow.Int32SizeBytes { + newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int32SizeBytes) + b.resize(newCapacity) + } + arrow.Int32Traits.PutValue(b.bytes[b.length:], v) + b.length += arrow.Int32SizeBytes +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_numeric.gen.go.tmpl new file mode 100644 index 000000000000..a0ff764767ee --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/bufferbuilder_numeric.gen.go.tmpl @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/memory" +) + +{{range .In}} +{{$TypeNamePrefix := .name}} +{{if .Opt.BufferBuilder}} +type {{$TypeNamePrefix}}BufferBuilder struct { + bufferBuilder +} + +func new{{.Name}}BufferBuilder(mem memory.Allocator) *{{$TypeNamePrefix}}BufferBuilder { + return &{{$TypeNamePrefix}}BufferBuilder{bufferBuilder:bufferBuilder{refCount: 1, mem:mem}} +} + +// AppendValues appends the contents of v to the buffer, growing the buffer as needed. +func (b *{{$TypeNamePrefix}}BufferBuilder) AppendValues(v []{{.Type}}) { b.Append(arrow.{{.Name}}Traits.CastToBytes(v)) } + +// Values returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *{{$TypeNamePrefix}}BufferBuilder) Values() []{{.Type}} { return arrow.{{.Name}}Traits.CastFromBytes(b.Bytes()) } + +// Value returns the {{.Type}} element at the index i. Value will panic if i is negative or ≥ Len. +func (b *{{$TypeNamePrefix}}BufferBuilder) Value(i int) {{.Type}} { return b.Values()[i] } + +// Len returns the number of {{.Type}} elements in the buffer. +func (b *{{$TypeNamePrefix}}BufferBuilder) Len() int { return b.length/arrow.{{.Name}}SizeBytes } + +// AppendValue appends v to the buffer, growing the buffer as needed. +func (b *{{$TypeNamePrefix}}BufferBuilder) AppendValue(v {{.Type}}) { + if b.capacity < b.length+arrow.{{.Name}}SizeBytes { + newCapacity := bitutil.NextPowerOf2(b.length + arrow.{{.Name}}SizeBytes) + b.resize(newCapacity) + } + arrow.{{.Name}}Traits.PutValue(b.bytes[b.length:], v) + b.length+=arrow.{{.Name}}SizeBytes +} +{{end}} +{{end}} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/builder.go b/vendor/github.com/apache/arrow/go/arrow/array/builder.go new file mode 100644 index 000000000000..bdd871611afc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/builder.go @@ -0,0 +1,273 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/memory" +) + +const ( + minBuilderCapacity = 1 << 5 +) + +// Builder provides an interface to build arrow arrays. +type Builder interface { + // Retain increases the reference count by 1. + // Retain may be called simultaneously from multiple goroutines. + Retain() + + // Release decreases the reference count by 1. + Release() + + // Len returns the number of elements in the array builder. + Len() int + + // Cap returns the total number of elements that can be stored + // without allocating additional memory. + Cap() int + + // NullN returns the number of null values in the array builder. + NullN() int + + // AppendNull adds a new null value to the array being built. + AppendNull() + + // Reserve ensures there is enough space for appending n elements + // by checking the capacity and calling Resize if necessary. + Reserve(n int) + + // Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), + // additional memory will be allocated. If n is smaller, the allocated memory may reduced. + Resize(n int) + + // NewArray creates a new array from the memory buffers used + // by the builder and resets the Builder so it can be used to build + // a new array. + NewArray() Interface + + init(capacity int) + resize(newBits int, init func(int)) +} + +// builder provides common functionality for managing the validity bitmap (nulls) when building arrays. +type builder struct { + refCount int64 + mem memory.Allocator + nullBitmap *memory.Buffer + nulls int + length int + capacity int +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *builder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Len returns the number of elements in the array builder. +func (b *builder) Len() int { return b.length } + +// Cap returns the total number of elements that can be stored without allocating additional memory. +func (b *builder) Cap() int { return b.capacity } + +// NullN returns the number of null values in the array builder. +func (b *builder) NullN() int { return b.nulls } + +func (b *builder) init(capacity int) { + toAlloc := bitutil.CeilByte(capacity) / 8 + b.nullBitmap = memory.NewResizableBuffer(b.mem) + b.nullBitmap.Resize(toAlloc) + b.capacity = capacity + memory.Set(b.nullBitmap.Buf(), 0) +} + +func (b *builder) reset() { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + + b.nulls = 0 + b.length = 0 + b.capacity = 0 +} + +func (b *builder) resize(newBits int, init func(int)) { + if b.nullBitmap == nil { + init(newBits) + return + } + + newBytesN := bitutil.CeilByte(newBits) / 8 + oldBytesN := b.nullBitmap.Len() + b.nullBitmap.Resize(newBytesN) + b.capacity = newBits + if oldBytesN < newBytesN { + // TODO(sgc): necessary? + memory.Set(b.nullBitmap.Buf()[oldBytesN:], 0) + } + if newBits < b.length { + b.length = newBits + b.nulls = newBits - bitutil.CountSetBits(b.nullBitmap.Buf(), 0, newBits) + } +} + +func (b *builder) reserve(elements int, resize func(int)) { + if b.length+elements > b.capacity { + newCap := bitutil.NextPowerOf2(b.length + elements) + resize(newCap) + } +} + +// unsafeAppendBoolsToBitmap appends the contents of valid to the validity bitmap. +// As an optimization, if the valid slice is empty, the next length bits will be set to valid (not null). +func (b *builder) unsafeAppendBoolsToBitmap(valid []bool, length int) { + if len(valid) == 0 { + b.unsafeSetValid(length) + return + } + + byteOffset := b.length / 8 + bitOffset := byte(b.length % 8) + nullBitmap := b.nullBitmap.Bytes() + bitSet := nullBitmap[byteOffset] + + for _, v := range valid { + if bitOffset == 8 { + bitOffset = 0 + nullBitmap[byteOffset] = bitSet + byteOffset++ + bitSet = nullBitmap[byteOffset] + } + + if v { + bitSet |= bitutil.BitMask[bitOffset] + } else { + bitSet &= bitutil.FlippedBitMask[bitOffset] + b.nulls++ + } + bitOffset++ + } + + if bitOffset != 0 { + nullBitmap[byteOffset] = bitSet + } + b.length += len(valid) +} + +// unsafeSetValid sets the next length bits to valid in the validity bitmap. +func (b *builder) unsafeSetValid(length int) { + padToByte := min(8-(b.length%8), length) + if padToByte == 8 { + padToByte = 0 + } + bits := b.nullBitmap.Bytes() + for i := b.length; i < b.length+padToByte; i++ { + bitutil.SetBit(bits, i) + } + + start := (b.length + padToByte) / 8 + fastLength := (length - padToByte) / 8 + memory.Set(bits[start:start+fastLength], 0xff) + + newLength := b.length + length + // trailing bytes + for i := b.length + padToByte + (fastLength * 8); i < newLength; i++ { + bitutil.SetBit(bits, i) + } + + b.length = newLength +} + +func (b *builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func newBuilder(mem memory.Allocator, dtype arrow.DataType) Builder { + // FIXME(sbinet): use a type switch on dtype instead? + switch dtype.ID() { + case arrow.NULL: + case arrow.BOOL: + return NewBooleanBuilder(mem) + case arrow.UINT8: + return NewUint8Builder(mem) + case arrow.INT8: + return NewInt8Builder(mem) + case arrow.UINT16: + return NewUint16Builder(mem) + case arrow.INT16: + return NewInt16Builder(mem) + case arrow.UINT32: + return NewUint32Builder(mem) + case arrow.INT32: + return NewInt32Builder(mem) + case arrow.UINT64: + return NewUint64Builder(mem) + case arrow.INT64: + return NewInt64Builder(mem) + case arrow.FLOAT16: + return NewFloat16Builder(mem) + case arrow.FLOAT32: + return NewFloat32Builder(mem) + case arrow.FLOAT64: + return NewFloat64Builder(mem) + case arrow.STRING: + return NewStringBuilder(mem) + case arrow.BINARY: + return NewBinaryBuilder(mem, arrow.BinaryTypes.Binary) + case arrow.FIXED_SIZE_BINARY: + typ := dtype.(*arrow.FixedSizeBinaryType) + return NewFixedSizeBinaryBuilder(mem, typ) + case arrow.DATE32: + case arrow.DATE64: + case arrow.TIMESTAMP: + case arrow.TIME32: + typ := dtype.(*arrow.Time32Type) + return NewTime32Builder(mem, typ) + case arrow.TIME64: + typ := dtype.(*arrow.Time64Type) + return NewTime64Builder(mem, typ) + case arrow.INTERVAL: + case arrow.DECIMAL: + case arrow.LIST: + typ := dtype.(*arrow.ListType) + return NewListBuilder(mem, typ.Elem()) + case arrow.STRUCT: + typ := dtype.(*arrow.StructType) + return NewStructBuilder(mem, typ) + case arrow.UNION: + case arrow.DICTIONARY: + case arrow.MAP: + case arrow.EXTENSION: + case arrow.FIXED_SIZE_LIST: + typ := dtype.(*arrow.FixedSizeListType) + return NewFixedSizeListBuilder(mem, typ.Len(), typ.Elem()) + case arrow.DURATION: + } + panic(fmt.Errorf("arrow/array: unsupported builder for %T", dtype)) +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/compare.go b/vendor/github.com/apache/arrow/go/arrow/array/compare.go new file mode 100644 index 000000000000..537630dbffc6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/compare.go @@ -0,0 +1,474 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "math" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/float16" + "golang.org/x/xerrors" +) + +// RecordEqual reports whether the two provided records are equal. +func RecordEqual(left, right Record) bool { + switch { + case left.NumCols() != right.NumCols(): + return false + case left.NumRows() != right.NumRows(): + return false + } + + for i := range left.Columns() { + lc := left.Column(i) + rc := right.Column(i) + if !ArrayEqual(lc, rc) { + return false + } + } + return true +} + +// RecordApproxEqual reports whether the two provided records are approximately equal. +// For non-floating point columns, it is equivalent to RecordEqual. +func RecordApproxEqual(left, right Record, opts ...EqualOption) bool { + switch { + case left.NumCols() != right.NumCols(): + return false + case left.NumRows() != right.NumRows(): + return false + } + + opt := newEqualOption(opts...) + + for i := range left.Columns() { + lc := left.Column(i) + rc := right.Column(i) + if !arrayApproxEqual(lc, rc, opt) { + return false + } + } + return true +} + +// ArrayEqual reports whether the two provided arrays are equal. +func ArrayEqual(left, right Interface) bool { + switch { + case !baseArrayEqual(left, right): + return false + case left.Len() == 0: + return true + case left.NullN() == left.Len(): + return true + } + + // at this point, we know both arrays have same type, same length, same number of nulls + // and nulls at the same place. + // compare the values. + + switch l := left.(type) { + case *Null: + return true + case *Boolean: + r := right.(*Boolean) + return arrayEqualBoolean(l, r) + case *FixedSizeBinary: + r := right.(*FixedSizeBinary) + return arrayEqualFixedSizeBinary(l, r) + case *Binary: + r := right.(*Binary) + return arrayEqualBinary(l, r) + case *String: + r := right.(*String) + return arrayEqualString(l, r) + case *Int8: + r := right.(*Int8) + return arrayEqualInt8(l, r) + case *Int16: + r := right.(*Int16) + return arrayEqualInt16(l, r) + case *Int32: + r := right.(*Int32) + return arrayEqualInt32(l, r) + case *Int64: + r := right.(*Int64) + return arrayEqualInt64(l, r) + case *Uint8: + r := right.(*Uint8) + return arrayEqualUint8(l, r) + case *Uint16: + r := right.(*Uint16) + return arrayEqualUint16(l, r) + case *Uint32: + r := right.(*Uint32) + return arrayEqualUint32(l, r) + case *Uint64: + r := right.(*Uint64) + return arrayEqualUint64(l, r) + case *Float16: + r := right.(*Float16) + return arrayEqualFloat16(l, r) + case *Float32: + r := right.(*Float32) + return arrayEqualFloat32(l, r) + case *Float64: + r := right.(*Float64) + return arrayEqualFloat64(l, r) + case *Decimal128: + r := right.(*Decimal128) + return arrayEqualDecimal128(l, r) + case *Date32: + r := right.(*Date32) + return arrayEqualDate32(l, r) + case *Date64: + r := right.(*Date64) + return arrayEqualDate64(l, r) + case *Time32: + r := right.(*Time32) + return arrayEqualTime32(l, r) + case *Time64: + r := right.(*Time64) + return arrayEqualTime64(l, r) + case *Timestamp: + r := right.(*Timestamp) + return arrayEqualTimestamp(l, r) + case *List: + r := right.(*List) + return arrayEqualList(l, r) + case *FixedSizeList: + r := right.(*FixedSizeList) + return arrayEqualFixedSizeList(l, r) + case *Struct: + r := right.(*Struct) + return arrayEqualStruct(l, r) + case *MonthInterval: + r := right.(*MonthInterval) + return arrayEqualMonthInterval(l, r) + case *DayTimeInterval: + r := right.(*DayTimeInterval) + return arrayEqualDayTimeInterval(l, r) + case *Duration: + r := right.(*Duration) + return arrayEqualDuration(l, r) + + default: + panic(xerrors.Errorf("arrow/array: unknown array type %T", l)) + } +} + +// ArraySliceEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are equal. +func ArraySliceEqual(left Interface, lbeg, lend int64, right Interface, rbeg, rend int64) bool { + l := NewSlice(left, lbeg, lend) + defer l.Release() + r := NewSlice(right, rbeg, rend) + defer r.Release() + + return ArrayEqual(l, r) +} + +const defaultAbsoluteTolerance = 1e-5 + +type equalOption struct { + atol float64 // absolute tolerance + nansEq bool // whether NaNs are considered equal. +} + +func (eq equalOption) f16(f1, f2 float16.Num) bool { + v1 := float64(f1.Float32()) + v2 := float64(f2.Float32()) + switch { + case eq.nansEq: + return math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2)) + default: + return math.Abs(v1-v2) <= eq.atol + } +} + +func (eq equalOption) f32(f1, f2 float32) bool { + v1 := float64(f1) + v2 := float64(f2) + switch { + case eq.nansEq: + return math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2)) + default: + return math.Abs(v1-v2) <= eq.atol + } +} + +func (eq equalOption) f64(v1, v2 float64) bool { + switch { + case eq.nansEq: + return math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2)) + default: + return math.Abs(v1-v2) <= eq.atol + } +} + +func newEqualOption(opts ...EqualOption) equalOption { + eq := equalOption{ + atol: defaultAbsoluteTolerance, + nansEq: false, + } + for _, opt := range opts { + opt(&eq) + } + + return eq +} + +// EqualOption is a functional option type used to configure how Records and Arrays are compared. +type EqualOption func(*equalOption) + +// WithNaNsEqual configures the comparison functions so that NaNs are considered equal. +func WithNaNsEqual(v bool) EqualOption { + return func(o *equalOption) { + o.nansEq = v + } +} + +// WithAbsTolerance configures the comparison functions so that 2 floating point values +// v1 and v2 are considered equal if |v1-v2| <= atol. +func WithAbsTolerance(atol float64) EqualOption { + return func(o *equalOption) { + o.atol = atol + } +} + +// ArrayApproxEqual reports whether the two provided arrays are approximately equal. +// For non-floating point arrays, it is equivalent to ArrayEqual. +func ArrayApproxEqual(left, right Interface, opts ...EqualOption) bool { + opt := newEqualOption(opts...) + return arrayApproxEqual(left, right, opt) +} + +func arrayApproxEqual(left, right Interface, opt equalOption) bool { + switch { + case !baseArrayEqual(left, right): + return false + case left.Len() == 0: + return true + case left.NullN() == left.Len(): + return true + } + + // at this point, we know both arrays have same type, same length, same number of nulls + // and nulls at the same place. + // compare the values. + + switch l := left.(type) { + case *Null: + return true + case *Boolean: + r := right.(*Boolean) + return arrayEqualBoolean(l, r) + case *FixedSizeBinary: + r := right.(*FixedSizeBinary) + return arrayEqualFixedSizeBinary(l, r) + case *Binary: + r := right.(*Binary) + return arrayEqualBinary(l, r) + case *String: + r := right.(*String) + return arrayEqualString(l, r) + case *Int8: + r := right.(*Int8) + return arrayEqualInt8(l, r) + case *Int16: + r := right.(*Int16) + return arrayEqualInt16(l, r) + case *Int32: + r := right.(*Int32) + return arrayEqualInt32(l, r) + case *Int64: + r := right.(*Int64) + return arrayEqualInt64(l, r) + case *Uint8: + r := right.(*Uint8) + return arrayEqualUint8(l, r) + case *Uint16: + r := right.(*Uint16) + return arrayEqualUint16(l, r) + case *Uint32: + r := right.(*Uint32) + return arrayEqualUint32(l, r) + case *Uint64: + r := right.(*Uint64) + return arrayEqualUint64(l, r) + case *Float16: + r := right.(*Float16) + return arrayApproxEqualFloat16(l, r, opt) + case *Float32: + r := right.(*Float32) + return arrayApproxEqualFloat32(l, r, opt) + case *Float64: + r := right.(*Float64) + return arrayApproxEqualFloat64(l, r, opt) + case *Decimal128: + r := right.(*Decimal128) + return arrayEqualDecimal128(l, r) + case *Date32: + r := right.(*Date32) + return arrayEqualDate32(l, r) + case *Date64: + r := right.(*Date64) + return arrayEqualDate64(l, r) + case *Time32: + r := right.(*Time32) + return arrayEqualTime32(l, r) + case *Time64: + r := right.(*Time64) + return arrayEqualTime64(l, r) + case *Timestamp: + r := right.(*Timestamp) + return arrayEqualTimestamp(l, r) + case *List: + r := right.(*List) + return arrayApproxEqualList(l, r, opt) + case *FixedSizeList: + r := right.(*FixedSizeList) + return arrayApproxEqualFixedSizeList(l, r, opt) + case *Struct: + r := right.(*Struct) + return arrayApproxEqualStruct(l, r, opt) + case *MonthInterval: + r := right.(*MonthInterval) + return arrayEqualMonthInterval(l, r) + case *DayTimeInterval: + r := right.(*DayTimeInterval) + return arrayEqualDayTimeInterval(l, r) + case *Duration: + r := right.(*Duration) + return arrayEqualDuration(l, r) + + default: + panic(xerrors.Errorf("arrow/array: unknown array type %T", l)) + } + + return false +} + +func baseArrayEqual(left, right Interface) bool { + switch { + case left.Len() != right.Len(): + return false + case left.NullN() != right.NullN(): + return false + case !arrow.TypeEqual(left.DataType(), right.DataType()): // We do not check for metadata as in the C++ implementation. + return false + case !validityBitmapEqual(left, right): + return false + } + return true +} + +func validityBitmapEqual(left, right Interface) bool { + // TODO(alexandreyc): make it faster by comparing byte slices of the validity bitmap? + n := left.Len() + if n != right.Len() { + return false + } + for i := 0; i < n; i++ { + if left.IsNull(i) != right.IsNull(i) { + return false + } + } + return true +} + +func arrayApproxEqualFloat16(left, right *Float16, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !opt.f16(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualFloat32(left, right *Float32, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !opt.f32(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualFloat64(left, right *Float64, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !opt.f64(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualList(left, right *List, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + +func arrayApproxEqualFixedSizeList(left, right *FixedSizeList, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + +func arrayApproxEqualStruct(left, right *Struct, opt equalOption) bool { + for i, lf := range left.fields { + rf := right.fields[i] + if !arrayApproxEqual(lf, rf, opt) { + return false + } + } + return true +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/data.go b/vendor/github.com/apache/arrow/go/arrow/array/data.go new file mode 100644 index 000000000000..264896159baa --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/data.go @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// Data represents the memory and metadata of an Arrow array. +type Data struct { + refCount int64 + dtype arrow.DataType + nulls int + offset int + length int + buffers []*memory.Buffer // TODO(sgc): should this be an interface? + childData []*Data // TODO(sgc): managed by ListArray, StructArray and UnionArray types +} + +// NewData creates a new Data. +func NewData(dtype arrow.DataType, length int, buffers []*memory.Buffer, childData []*Data, nulls, offset int) *Data { + for _, b := range buffers { + if b != nil { + b.Retain() + } + } + + for _, child := range childData { + if child != nil { + child.Retain() + } + } + + return &Data{ + refCount: 1, + dtype: dtype, + nulls: nulls, + length: length, + offset: offset, + buffers: buffers, + childData: childData, + } +} + +// Reset sets the Data for re-use. +func (d *Data) Reset(dtype arrow.DataType, length int, buffers []*memory.Buffer, childData []*Data, nulls, offset int) { + // Retain new buffers before releasing existing buffers in-case they're the same ones to prevent accidental premature + // release. + for _, b := range buffers { + if b != nil { + b.Retain() + } + } + for _, b := range d.buffers { + if b != nil { + b.Release() + } + } + d.buffers = buffers + + // Retain new children data before releasing existing children data in-case they're the same ones to prevent accidental + // premature release. + for _, d := range childData { + if d != nil { + d.Retain() + } + } + for _, d := range d.childData { + if d != nil { + d.Release() + } + } + d.childData = childData + + d.dtype = dtype + d.length = length + d.nulls = nulls + d.offset = offset +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (d *Data) Retain() { + atomic.AddInt64(&d.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (d *Data) Release() { + debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases") + + if atomic.AddInt64(&d.refCount, -1) == 0 { + for _, b := range d.buffers { + if b != nil { + b.Release() + } + } + + for _, b := range d.childData { + b.Release() + } + d.buffers, d.childData = nil, nil + } +} + +// DataType returns the DataType of the data. +func (d *Data) DataType() arrow.DataType { return d.dtype } + +// NullN returns the number of nulls. +func (d *Data) NullN() int { return d.nulls } + +// Len returns the length. +func (d *Data) Len() int { return d.length } + +// Offset returns the offset. +func (d *Data) Offset() int { return d.offset } + +// Buffers returns the buffers. +func (d *Data) Buffers() []*memory.Buffer { return d.buffers } + +// NewSliceData returns a new slice that shares backing data with the input. +// The returned Data slice starts at i and extends j-i elements, such as: +// slice := data[i:j] +// The returned value must be Release'd after use. +// +// NewSliceData panics if the slice is outside the valid range of the input Data. +// NewSliceData panics if j < i. +func NewSliceData(data *Data, i, j int64) *Data { + if j > int64(data.length) || i > j || data.offset+int(i) > data.offset+data.length { + panic("arrow/array: index out of range") + } + + for _, b := range data.buffers { + if b != nil { + b.Retain() + } + } + + for _, child := range data.childData { + if child != nil { + child.Retain() + } + } + + o := &Data{ + refCount: 1, + dtype: data.dtype, + nulls: UnknownNullCount, + length: int(j - i), + offset: data.offset + int(i), + buffers: data.buffers, + childData: data.childData, + } + + if data.nulls == 0 { + o.nulls = 0 + } + + return o +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/decimal128.go b/vendor/github.com/apache/arrow/go/arrow/array/decimal128.go new file mode 100644 index 000000000000..3acf6b9c444d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/decimal128.go @@ -0,0 +1,235 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array // import "github.com/apache/arrow/go/arrow/array" + +import ( + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/decimal128" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// A type which represents an immutable sequence of 128-bit decimal values. +type Decimal128 struct { + array + + values []decimal128.Num +} + +func NewDecimal128Data(data *Data) *Decimal128 { + a := &Decimal128{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *Decimal128) Value(i int) decimal128.Num { return a.values[i] } + +func (a *Decimal128) Values() []decimal128.Num { return a.values } + +func (a *Decimal128) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Decimal128) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Decimal128Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualDecimal128(left, right *Decimal128) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type Decimal128Builder struct { + builder + + dtype *arrow.Decimal128Type + data *memory.Buffer + rawData []decimal128.Num +} + +func NewDecimal128Builder(mem memory.Allocator, dtype *arrow.Decimal128Type) *Decimal128Builder { + return &Decimal128Builder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Decimal128Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Decimal128Builder) Append(v decimal128.Num) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Decimal128Builder) UnsafeAppend(v decimal128.Num) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Decimal128Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Decimal128Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Decimal128Builder) AppendValues(v []decimal128.Num, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + if len(v) > 0 { + arrow.Decimal128Traits.Copy(b.rawData[b.length:], v) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Decimal128Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Decimal128Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Decimal128Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Decimal128Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Decimal128Traits.BytesRequired(n)) + b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder +// so it can be used to build a new array. +func (b *Decimal128Builder) NewArray() Interface { + return b.NewDecimal128Array() +} + +// NewDecimal128Array creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder +// so it can be used to build a new array. +func (b *Decimal128Builder) NewDecimal128Array() (a *Decimal128) { + data := b.newData() + a = NewDecimal128Data(data) + data.Release() + return +} + +func (b *Decimal128Builder) newData() (data *Data) { + bytesRequired := arrow.Decimal128Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +var ( + _ Interface = (*Decimal128)(nil) + _ Builder = (*Decimal128Builder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/doc.go b/vendor/github.com/apache/arrow/go/arrow/array/doc.go new file mode 100644 index 000000000000..5cf85408626a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/doc.go @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package array provides implementations of various Arrow array types. +*/ +package array diff --git a/vendor/github.com/apache/arrow/go/arrow/array/fixed_size_list.go b/vendor/github.com/apache/arrow/go/arrow/array/fixed_size_list.go new file mode 100644 index 000000000000..b0a2ce75b978 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/fixed_size_list.go @@ -0,0 +1,240 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// FixedSizeList represents an immutable sequence of N array values. +type FixedSizeList struct { + array + n int32 + values Interface +} + +// NewFixedSizeListData returns a new List array value, from data. +func NewFixedSizeListData(data *Data) *FixedSizeList { + a := &FixedSizeList{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *FixedSizeList) ListValues() Interface { return a.values } + +func (a *FixedSizeList) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if !a.IsValid(i) { + o.WriteString("(null)") + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *FixedSizeList) newListValue(i int) Interface { + n := int64(a.n) + off := int64(a.array.data.offset) + beg := (off + int64(i)) * n + end := (off + int64(i+1)) * n + sli := NewSlice(a.values, beg, end) + return sli +} + +func (a *FixedSizeList) setData(data *Data) { + a.array.setData(data) + a.n = a.DataType().(*arrow.FixedSizeListType).Len() + a.values = MakeFromData(data.childData[0]) +} + +func arrayEqualFixedSizeList(left, right *FixedSizeList) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return ArrayEqual(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *FixedSizeList) Len() int { return a.array.Len() } + +func (a *FixedSizeList) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *FixedSizeList) Release() { + a.array.Release() + a.values.Release() +} + +type FixedSizeListBuilder struct { + builder + + etype arrow.DataType // data type of the list's elements. + n int32 // number of elements in the fixed-size list. + values Builder // value builder for the list's elements. +} + +// NewFixedSizeListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewFixedSizeListBuilder(mem memory.Allocator, n int32, etype arrow.DataType) *FixedSizeListBuilder { + return &FixedSizeListBuilder{ + builder: builder{refCount: 1, mem: mem}, + etype: etype, + n: n, + values: newBuilder(mem, etype), + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *FixedSizeListBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.values != nil { + b.values.Release() + b.values = nil + } + } +} + +func (b *FixedSizeListBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) +} + +func (b *FixedSizeListBuilder) AppendNull() { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(false) +} + +func (b *FixedSizeListBuilder) AppendValues(valid []bool) { + b.Reserve(len(valid)) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *FixedSizeListBuilder) unsafeAppend(v bool) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.length++ +} + +func (b *FixedSizeListBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *FixedSizeListBuilder) init(capacity int) { + b.builder.init(capacity) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *FixedSizeListBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *FixedSizeListBuilder) Resize(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *FixedSizeListBuilder) ValueBuilder() Builder { + return b.values +} + +// NewArray creates a List array from the memory buffers used by the builder and resets the FixedSizeListBuilder +// so it can be used to build a new array. +func (b *FixedSizeListBuilder) NewArray() Interface { + return b.NewListArray() +} + +// NewListArray creates a List array from the memory buffers used by the builder and resets the FixedSizeListBuilder +// so it can be used to build a new array. +func (b *FixedSizeListBuilder) NewListArray() (a *FixedSizeList) { + data := b.newData() + a = NewFixedSizeListData(data) + data.Release() + return +} + +func (b *FixedSizeListBuilder) newData() (data *Data) { + values := b.values.NewArray() + defer values.Release() + + data = NewData( + arrow.FixedSizeListOf(b.n, b.etype), b.length, + []*memory.Buffer{b.nullBitmap}, + []*Data{values.Data()}, + b.nulls, + 0, + ) + b.reset() + + return +} + +var ( + _ Interface = (*FixedSizeList)(nil) + _ Builder = (*FixedSizeListBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/fixedsize_binary.go b/vendor/github.com/apache/arrow/go/arrow/array/fixedsize_binary.go new file mode 100644 index 000000000000..502fb99e5112 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/fixedsize_binary.go @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strings" + + "github.com/apache/arrow/go/arrow" +) + +// A type which represents an immutable sequence of fixed-length binary strings. +type FixedSizeBinary struct { + array + + valueBytes []byte + bytewidth int32 +} + +// NewFixedSizeBinaryData constructs a new fixed-size binary array from data. +func NewFixedSizeBinaryData(data *Data) *FixedSizeBinary { + a := &FixedSizeBinary{bytewidth: int32(data.DataType().(arrow.FixedWidthDataType).BitWidth() / 8)} + a.refCount = 1 + a.setData(data) + return a +} + +// Value returns the fixed-size slice at index i. This value should not be mutated. +func (a *FixedSizeBinary) Value(i int) []byte { + i += a.array.data.offset + var ( + bw = int(a.bytewidth) + beg = i * bw + end = (i + 1) * bw + ) + return a.valueBytes[beg:end] +} + +func (a *FixedSizeBinary) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *FixedSizeBinary) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.valueBytes = vals.Bytes() + } + +} + +func arrayEqualFixedSizeBinary(left, right *FixedSizeBinary) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if bytes.Compare(left.Value(i), right.Value(i)) != 0 { + return false + } + } + return true +} + +var ( + _ Interface = (*FixedSizeBinary)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/fixedsize_binarybuilder.go b/vendor/github.com/apache/arrow/go/arrow/array/fixedsize_binarybuilder.go new file mode 100644 index 000000000000..8a2f65f59c63 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/fixedsize_binarybuilder.go @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// A FixedSizeBinaryBuilder is used to build a FixedSizeBinary array using the Append methods. +type FixedSizeBinaryBuilder struct { + builder + + dtype *arrow.FixedSizeBinaryType + values *byteBufferBuilder +} + +func NewFixedSizeBinaryBuilder(mem memory.Allocator, dtype *arrow.FixedSizeBinaryType) *FixedSizeBinaryBuilder { + b := &FixedSizeBinaryBuilder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + values: newByteBufferBuilder(mem), + } + return b +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *FixedSizeBinaryBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.values != nil { + b.values.Release() + b.values = nil + } + } +} + +func (b *FixedSizeBinaryBuilder) Append(v []byte) { + if len(v) != b.dtype.ByteWidth { + // TODO(alexandre): should we return an error instead? + panic("len(v) != b.dtype.ByteWidth") + } + + b.Reserve(1) + b.values.Append(v) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *FixedSizeBinaryBuilder) AppendNull() { + b.Reserve(1) + b.values.Advance(b.dtype.ByteWidth) + b.UnsafeAppendBoolToBitmap(false) +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *FixedSizeBinaryBuilder) AppendValues(v [][]byte, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for _, vv := range v { + switch len(vv) { + case 0: + b.values.Advance(b.dtype.ByteWidth) + case b.dtype.ByteWidth: + b.values.Append(vv) + default: + panic(fmt.Errorf("array: invalid binary length (got=%d, want=%d)", len(vv), b.dtype.ByteWidth)) + } + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *FixedSizeBinaryBuilder) init(capacity int) { + b.builder.init(capacity) + b.values.resize(capacity * b.dtype.ByteWidth) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *FixedSizeBinaryBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *FixedSizeBinaryBuilder) Resize(n int) { + b.builder.resize(n, b.init) +} + +// NewArray creates a FixedSizeBinary array from the memory buffers used by the +// builder and resets the FixedSizeBinaryBuilder so it can be used to build a new array. +func (b *FixedSizeBinaryBuilder) NewArray() Interface { + return b.NewFixedSizeBinaryArray() +} + +// NewFixedSizeBinaryArray creates a FixedSizeBinary array from the memory buffers used by the builder and resets the FixedSizeBinaryBuilder +// so it can be used to build a new array. +func (b *FixedSizeBinaryBuilder) NewFixedSizeBinaryArray() (a *FixedSizeBinary) { + data := b.newData() + a = NewFixedSizeBinaryData(data) + data.Release() + return +} + +func (b *FixedSizeBinaryBuilder) newData() (data *Data) { + values := b.values.Finish() + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, values}, nil, b.nulls, 0) + + if values != nil { + values.Release() + } + + b.builder.reset() + + return +} + +var ( + _ Builder = (*FixedSizeBinaryBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/float16.go b/vendor/github.com/apache/arrow/go/arrow/array/float16.go new file mode 100644 index 000000000000..931e2d96c076 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/float16.go @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/float16" +) + +// A type which represents an immutable sequence of Float16 values. +type Float16 struct { + array + values []float16.Num +} + +func NewFloat16Data(data *Data) *Float16 { + a := &Float16{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *Float16) Value(i int) float16.Num { return a.values[i] } + +func (a *Float16) Values() []float16.Num { return a.values } + +func (a *Float16) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.values[i].Float32()) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Float16) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Float16Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualFloat16(left, right *Float16) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +var ( + _ Interface = (*Float16)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/float16_builder.go b/vendor/github.com/apache/arrow/go/arrow/array/float16_builder.go new file mode 100644 index 000000000000..80864279a061 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/float16_builder.go @@ -0,0 +1,165 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/float16" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +type Float16Builder struct { + builder + + data *memory.Buffer + rawData []float16.Num +} + +func NewFloat16Builder(mem memory.Allocator) *Float16Builder { + return &Float16Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Float16Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Float16Builder) Append(v float16.Num) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Float16Builder) UnsafeAppend(v float16.Num) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Float16Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Float16Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Float16Builder) AppendValues(v []float16.Num, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + if len(v) > 0 { + arrow.Float16Traits.Copy(b.rawData[b.length:], v) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Float16Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint16Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Float16Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Float16Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Float16Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Float16Traits.BytesRequired(n)) + b.rawData = arrow.Float16Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Float16 array from the memory buffers used by the builder and resets the Float16Builder +// so it can be used to build a new array. +func (b *Float16Builder) NewArray() Interface { + return b.NewFloat16Array() +} + +// NewFloat16Array creates a Float16 array from the memory buffers used by the builder and resets the Float16Builder +// so it can be used to build a new array. +func (b *Float16Builder) NewFloat16Array() (a *Float16) { + data := b.newData() + a = NewFloat16Data(data) + data.Release() + return +} + +func (b *Float16Builder) newData() (data *Data) { + bytesRequired := arrow.Float16Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.Float16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/interval.go b/vendor/github.com/apache/arrow/go/arrow/array/interval.go new file mode 100644 index 000000000000..d3135587c279 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/interval.go @@ -0,0 +1,434 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array // import "github.com/apache/arrow/go/arrow/array" + +import ( + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" + "golang.org/x/xerrors" +) + +func NewIntervalData(data *Data) Interface { + switch data.dtype.(type) { + case *arrow.MonthIntervalType: + return NewMonthIntervalData(data) + case *arrow.DayTimeIntervalType: + return NewDayTimeIntervalData(data) + default: + panic(xerrors.Errorf("arrow/array: unknown interval data type %T", data.dtype)) + } +} + +// A type which represents an immutable sequence of arrow.MonthInterval values. +type MonthInterval struct { + array + values []arrow.MonthInterval +} + +func NewMonthIntervalData(data *Data) *MonthInterval { + a := &MonthInterval{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *MonthInterval) Value(i int) arrow.MonthInterval { return a.values[i] } +func (a *MonthInterval) MonthIntervalValues() []arrow.MonthInterval { return a.values } + +func (a *MonthInterval) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *MonthInterval) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.MonthIntervalTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualMonthInterval(left, right *MonthInterval) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type MonthIntervalBuilder struct { + builder + + data *memory.Buffer + rawData []arrow.MonthInterval +} + +func NewMonthIntervalBuilder(mem memory.Allocator) *MonthIntervalBuilder { + return &MonthIntervalBuilder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *MonthIntervalBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *MonthIntervalBuilder) Append(v arrow.MonthInterval) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *MonthIntervalBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *MonthIntervalBuilder) UnsafeAppend(v arrow.MonthInterval) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *MonthIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *MonthIntervalBuilder) AppendValues(v []arrow.MonthInterval, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.MonthIntervalTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *MonthIntervalBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.MonthIntervalTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.MonthIntervalTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *MonthIntervalBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *MonthIntervalBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.MonthIntervalTraits.BytesRequired(n)) + b.rawData = arrow.MonthIntervalTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a MonthInterval array from the memory buffers used by the builder and resets the MonthIntervalBuilder +// so it can be used to build a new array. +func (b *MonthIntervalBuilder) NewArray() Interface { + return b.NewMonthIntervalArray() +} + +// NewMonthIntervalArray creates a MonthInterval array from the memory buffers used by the builder and resets the MonthIntervalBuilder +// so it can be used to build a new array. +func (b *MonthIntervalBuilder) NewMonthIntervalArray() (a *MonthInterval) { + data := b.newData() + a = NewMonthIntervalData(data) + data.Release() + return +} + +func (b *MonthIntervalBuilder) newData() (data *Data) { + bytesRequired := arrow.MonthIntervalTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.MonthInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +// A type which represents an immutable sequence of arrow.DayTimeInterval values. +type DayTimeInterval struct { + array + values []arrow.DayTimeInterval +} + +func NewDayTimeIntervalData(data *Data) *DayTimeInterval { + a := &DayTimeInterval{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *DayTimeInterval) Value(i int) arrow.DayTimeInterval { return a.values[i] } +func (a *DayTimeInterval) DayTimeIntervalValues() []arrow.DayTimeInterval { return a.values } + +func (a *DayTimeInterval) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *DayTimeInterval) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.DayTimeIntervalTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualDayTimeInterval(left, right *DayTimeInterval) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type DayTimeIntervalBuilder struct { + builder + + data *memory.Buffer + rawData []arrow.DayTimeInterval +} + +func NewDayTimeIntervalBuilder(mem memory.Allocator) *DayTimeIntervalBuilder { + return &DayTimeIntervalBuilder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *DayTimeIntervalBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *DayTimeIntervalBuilder) Append(v arrow.DayTimeInterval) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *DayTimeIntervalBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *DayTimeIntervalBuilder) UnsafeAppend(v arrow.DayTimeInterval) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *DayTimeIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *DayTimeIntervalBuilder) AppendValues(v []arrow.DayTimeInterval, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.DayTimeIntervalTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *DayTimeIntervalBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.DayTimeIntervalTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.DayTimeIntervalTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *DayTimeIntervalBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *DayTimeIntervalBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.DayTimeIntervalTraits.BytesRequired(n)) + b.rawData = arrow.DayTimeIntervalTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a DayTimeInterval array from the memory buffers used by the builder and resets the DayTimeIntervalBuilder +// so it can be used to build a new array. +func (b *DayTimeIntervalBuilder) NewArray() Interface { + return b.NewDayTimeIntervalArray() +} + +// NewDayTimeIntervalArray creates a DayTimeInterval array from the memory buffers used by the builder and resets the DayTimeIntervalBuilder +// so it can be used to build a new array. +func (b *DayTimeIntervalBuilder) NewDayTimeIntervalArray() (a *DayTimeInterval) { + data := b.newData() + a = NewDayTimeIntervalData(data) + data.Release() + return +} + +func (b *DayTimeIntervalBuilder) newData() (data *Data) { + bytesRequired := arrow.DayTimeIntervalTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.DayTimeInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +var ( + _ Interface = (*MonthInterval)(nil) + _ Interface = (*DayTimeInterval)(nil) + + _ Builder = (*MonthIntervalBuilder)(nil) + _ Builder = (*DayTimeIntervalBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/list.go b/vendor/github.com/apache/arrow/go/arrow/array/list.go new file mode 100644 index 000000000000..976f9acea20d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/list.go @@ -0,0 +1,269 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// List represents an immutable sequence of array values. +type List struct { + array + values Interface + offsets []int32 +} + +// NewListData returns a new List array value, from data. +func NewListData(data *Data) *List { + a := &List{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *List) ListValues() Interface { return a.values } + +func (a *List) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if !a.IsValid(i) { + o.WriteString("(null)") + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *List) newListValue(i int) Interface { + j := i + a.array.data.offset + beg := int64(a.offsets[j]) + end := int64(a.offsets[j+1]) + return NewSlice(a.values, beg, end) +} + +func (a *List) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.offsets = arrow.Int32Traits.CastFromBytes(vals.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func arrayEqualList(left, right *List) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return ArrayEqual(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *List) Len() int { return a.array.Len() } + +func (a *List) Offsets() []int32 { return a.offsets } + +func (a *List) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *List) Release() { + a.array.Release() + a.values.Release() +} + +type ListBuilder struct { + builder + + etype arrow.DataType // data type of the list's elements. + values Builder // value builder for the list's elements. + offsets *Int32Builder +} + +// NewListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewListBuilder(mem memory.Allocator, etype arrow.DataType) *ListBuilder { + return &ListBuilder{ + builder: builder{refCount: 1, mem: mem}, + etype: etype, + values: newBuilder(mem, etype), + offsets: NewInt32Builder(mem), + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *ListBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + } + + b.values.Release() + b.offsets.Release() +} + +func (b *ListBuilder) appendNextOffset() { + b.offsets.Append(int32(b.values.Len())) +} + +func (b *ListBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) + b.appendNextOffset() +} + +func (b *ListBuilder) AppendNull() { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(false) + b.appendNextOffset() +} + +func (b *ListBuilder) AppendValues(offsets []int32, valid []bool) { + b.Reserve(len(valid)) + b.offsets.AppendValues(offsets, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *ListBuilder) unsafeAppend(v bool) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.length++ +} + +func (b *ListBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *ListBuilder) init(capacity int) { + b.builder.init(capacity) + b.offsets.init(capacity + 1) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *ListBuilder) Reserve(n int) { + b.builder.reserve(n, b.resizeHelper) + b.offsets.Reserve(n) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *ListBuilder) Resize(n int) { + b.resizeHelper(n) + b.offsets.Resize(n) +} + +func (b *ListBuilder) resizeHelper(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *ListBuilder) ValueBuilder() Builder { + return b.values +} + +// NewArray creates a List array from the memory buffers used by the builder and resets the ListBuilder +// so it can be used to build a new array. +func (b *ListBuilder) NewArray() Interface { + return b.NewListArray() +} + +// NewListArray creates a List array from the memory buffers used by the builder and resets the ListBuilder +// so it can be used to build a new array. +func (b *ListBuilder) NewListArray() (a *List) { + if b.offsets.Len() != b.length+1 { + b.appendNextOffset() + } + data := b.newData() + a = NewListData(data) + data.Release() + return +} + +func (b *ListBuilder) newData() (data *Data) { + values := b.values.NewArray() + defer values.Release() + + var offsets *memory.Buffer + if b.offsets != nil { + arr := b.offsets.NewInt32Array() + defer arr.Release() + offsets = arr.Data().buffers[1] + } + + data = NewData( + arrow.ListOf(b.etype), b.length, + []*memory.Buffer{ + b.nullBitmap, + offsets, + }, + []*Data{values.Data()}, + b.nulls, + 0, + ) + b.reset() + + return +} + +var ( + _ Interface = (*List)(nil) + _ Builder = (*ListBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/null.go b/vendor/github.com/apache/arrow/go/arrow/array/null.go new file mode 100644 index 000000000000..56d7fa45fa18 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/null.go @@ -0,0 +1,140 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// Null represents an immutable, degenerate array with no physical storage. +type Null struct { + array +} + +// NewNull returns a new Null array value of size n. +func NewNull(n int) *Null { + a := &Null{} + a.refCount = 1 + data := NewData( + arrow.Null, n, + []*memory.Buffer{nil}, + nil, + n, + 0, + ) + a.setData(data) + data.Release() + return a +} + +// NewNullData returns a new Null array value, from data. +func NewNullData(data *Data) *Null { + a := &Null{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *Null) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + o.WriteString("(null)") + } + o.WriteString("]") + return o.String() +} + +func (a *Null) setData(data *Data) { + a.array.setData(data) + a.array.nullBitmapBytes = nil + a.array.data.nulls = a.array.data.length +} + +type NullBuilder struct { + builder +} + +// NewNullBuilder returns a builder, using the provided memory allocator. +func NewNullBuilder(mem memory.Allocator) *NullBuilder { + return &NullBuilder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *NullBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + } +} + +func (b *NullBuilder) AppendNull() { + b.builder.length++ + b.builder.nulls++ +} + +func (*NullBuilder) Reserve(size int) {} +func (*NullBuilder) Resize(size int) {} + +func (*NullBuilder) init(cap int) {} +func (*NullBuilder) resize(newBits int, init func(int)) {} + +// NewArray creates a Null array from the memory buffers used by the builder and resets the NullBuilder +// so it can be used to build a new array. +func (b *NullBuilder) NewArray() Interface { + return b.NewNullArray() +} + +// NewNullArray creates a Null array from the memory buffers used by the builder and resets the NullBuilder +// so it can be used to build a new array. +func (b *NullBuilder) NewNullArray() (a *Null) { + data := b.newData() + a = NewNullData(data) + data.Release() + return +} + +func (b *NullBuilder) newData() (data *Data) { + data = NewData( + arrow.Null, b.length, + []*memory.Buffer{nil}, + nil, + b.nulls, + 0, + ) + b.reset() + + return +} + +var ( + _ Interface = (*Null)(nil) + _ Builder = (*NullBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/numeric.gen.go b/vendor/github.com/apache/arrow/go/arrow/array/numeric.gen.go new file mode 100644 index 000000000000..068ab1ec98e7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/numeric.gen.go @@ -0,0 +1,1098 @@ +// Code generated by array/numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + + "github.com/apache/arrow/go/arrow" +) + +// A type which represents an immutable sequence of int64 values. +type Int64 struct { + array + values []int64 +} + +// NewInt64Data creates a new Int64. +func NewInt64Data(data *Data) *Int64 { + a := &Int64{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Int64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int64) Value(i int) int64 { return a.values[i] } + +// Values returns the values. +func (a *Int64) Int64Values() []int64 { return a.values } + +// String returns a string representation of the array. +func (a *Int64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualInt64(left, right *Int64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint64 values. +type Uint64 struct { + array + values []uint64 +} + +// NewUint64Data creates a new Uint64. +func NewUint64Data(data *Data) *Uint64 { + a := &Uint64{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Uint64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint64) Value(i int) uint64 { return a.values[i] } + +// Values returns the values. +func (a *Uint64) Uint64Values() []uint64 { return a.values } + +// String returns a string representation of the array. +func (a *Uint64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualUint64(left, right *Uint64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of float64 values. +type Float64 struct { + array + values []float64 +} + +// NewFloat64Data creates a new Float64. +func NewFloat64Data(data *Data) *Float64 { + a := &Float64{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Float64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Float64) Value(i int) float64 { return a.values[i] } + +// Values returns the values. +func (a *Float64) Float64Values() []float64 { return a.values } + +// String returns a string representation of the array. +func (a *Float64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Float64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Float64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualFloat64(left, right *Float64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of int32 values. +type Int32 struct { + array + values []int32 +} + +// NewInt32Data creates a new Int32. +func NewInt32Data(data *Data) *Int32 { + a := &Int32{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Int32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int32) Value(i int) int32 { return a.values[i] } + +// Values returns the values. +func (a *Int32) Int32Values() []int32 { return a.values } + +// String returns a string representation of the array. +func (a *Int32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualInt32(left, right *Int32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint32 values. +type Uint32 struct { + array + values []uint32 +} + +// NewUint32Data creates a new Uint32. +func NewUint32Data(data *Data) *Uint32 { + a := &Uint32{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Uint32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint32) Value(i int) uint32 { return a.values[i] } + +// Values returns the values. +func (a *Uint32) Uint32Values() []uint32 { return a.values } + +// String returns a string representation of the array. +func (a *Uint32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualUint32(left, right *Uint32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of float32 values. +type Float32 struct { + array + values []float32 +} + +// NewFloat32Data creates a new Float32. +func NewFloat32Data(data *Data) *Float32 { + a := &Float32{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Float32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Float32) Value(i int) float32 { return a.values[i] } + +// Values returns the values. +func (a *Float32) Float32Values() []float32 { return a.values } + +// String returns a string representation of the array. +func (a *Float32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Float32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Float32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualFloat32(left, right *Float32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of int16 values. +type Int16 struct { + array + values []int16 +} + +// NewInt16Data creates a new Int16. +func NewInt16Data(data *Data) *Int16 { + a := &Int16{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Int16) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int16) Value(i int) int16 { return a.values[i] } + +// Values returns the values. +func (a *Int16) Int16Values() []int16 { return a.values } + +// String returns a string representation of the array. +func (a *Int16) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int16) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int16Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualInt16(left, right *Int16) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint16 values. +type Uint16 struct { + array + values []uint16 +} + +// NewUint16Data creates a new Uint16. +func NewUint16Data(data *Data) *Uint16 { + a := &Uint16{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Uint16) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint16) Value(i int) uint16 { return a.values[i] } + +// Values returns the values. +func (a *Uint16) Uint16Values() []uint16 { return a.values } + +// String returns a string representation of the array. +func (a *Uint16) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint16) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint16Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualUint16(left, right *Uint16) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of int8 values. +type Int8 struct { + array + values []int8 +} + +// NewInt8Data creates a new Int8. +func NewInt8Data(data *Data) *Int8 { + a := &Int8{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Int8) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int8) Value(i int) int8 { return a.values[i] } + +// Values returns the values. +func (a *Int8) Int8Values() []int8 { return a.values } + +// String returns a string representation of the array. +func (a *Int8) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int8) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int8Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualInt8(left, right *Int8) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint8 values. +type Uint8 struct { + array + values []uint8 +} + +// NewUint8Data creates a new Uint8. +func NewUint8Data(data *Data) *Uint8 { + a := &Uint8{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Uint8) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint8) Value(i int) uint8 { return a.values[i] } + +// Values returns the values. +func (a *Uint8) Uint8Values() []uint8 { return a.values } + +// String returns a string representation of the array. +func (a *Uint8) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint8) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint8Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualUint8(left, right *Uint8) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Timestamp values. +type Timestamp struct { + array + values []arrow.Timestamp +} + +// NewTimestampData creates a new Timestamp. +func NewTimestampData(data *Data) *Timestamp { + a := &Timestamp{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Timestamp) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Timestamp) Value(i int) arrow.Timestamp { return a.values[i] } + +// Values returns the values. +func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.values } + +// String returns a string representation of the array. +func (a *Timestamp) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Timestamp) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.TimestampTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualTimestamp(left, right *Timestamp) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Time32 values. +type Time32 struct { + array + values []arrow.Time32 +} + +// NewTime32Data creates a new Time32. +func NewTime32Data(data *Data) *Time32 { + a := &Time32{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Time32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Time32) Value(i int) arrow.Time32 { return a.values[i] } + +// Values returns the values. +func (a *Time32) Time32Values() []arrow.Time32 { return a.values } + +// String returns a string representation of the array. +func (a *Time32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Time32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Time32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualTime32(left, right *Time32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Time64 values. +type Time64 struct { + array + values []arrow.Time64 +} + +// NewTime64Data creates a new Time64. +func NewTime64Data(data *Data) *Time64 { + a := &Time64{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Time64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Time64) Value(i int) arrow.Time64 { return a.values[i] } + +// Values returns the values. +func (a *Time64) Time64Values() []arrow.Time64 { return a.values } + +// String returns a string representation of the array. +func (a *Time64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Time64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Time64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualTime64(left, right *Time64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Date32 values. +type Date32 struct { + array + values []arrow.Date32 +} + +// NewDate32Data creates a new Date32. +func NewDate32Data(data *Data) *Date32 { + a := &Date32{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Date32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Date32) Value(i int) arrow.Date32 { return a.values[i] } + +// Values returns the values. +func (a *Date32) Date32Values() []arrow.Date32 { return a.values } + +// String returns a string representation of the array. +func (a *Date32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Date32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Date32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualDate32(left, right *Date32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Date64 values. +type Date64 struct { + array + values []arrow.Date64 +} + +// NewDate64Data creates a new Date64. +func NewDate64Data(data *Data) *Date64 { + a := &Date64{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Date64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Date64) Value(i int) arrow.Date64 { return a.values[i] } + +// Values returns the values. +func (a *Date64) Date64Values() []arrow.Date64 { return a.values } + +// String returns a string representation of the array. +func (a *Date64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Date64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Date64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualDate64(left, right *Date64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Duration values. +type Duration struct { + array + values []arrow.Duration +} + +// NewDurationData creates a new Duration. +func NewDurationData(data *Data) *Duration { + a := &Duration{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *Duration) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Duration) Value(i int) arrow.Duration { return a.values[i] } + +// Values returns the values. +func (a *Duration) DurationValues() []arrow.Duration { return a.values } + +// String returns a string representation of the array. +func (a *Duration) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Duration) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.DurationTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqualDuration(left, right *Duration) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/array/numeric.gen.go.tmpl new file mode 100644 index 000000000000..b742823a18d8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/numeric.gen.go.tmpl @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + + "github.com/apache/arrow/go/arrow" +) + +{{range .In}} + +// A type which represents an immutable sequence of {{or .QualifiedType .Type}} values. +type {{.Name}} struct { + array + values []{{or .QualifiedType .Type}} +} + +// New{{.Name}}Data creates a new {{.Name}}. +func New{{.Name}}Data(data *Data) *{{.Name}} { + a := &{{.Name}}{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the array for re-use. +func (a *{{.Name}}) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *{{.Name}}) Value(i int) {{or .QualifiedType .Type}} { return a.values[i] } + +// Values returns the values. +func (a *{{.Name}}) {{.Name}}Values() []{{or .QualifiedType .Type}} { return a.values } + +// String returns a string representation of the array. +func (a *{{.Name}}) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *{{.Name}}) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.{{.Name}}Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func arrayEqual{{.Name}}(left, right *{{.Name}}) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +{{end}} diff --git a/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen.go b/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen.go new file mode 100644 index 000000000000..5423d8ce515c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen.go @@ -0,0 +1,2227 @@ +// Code generated by array/numericbuilder.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +type Int64Builder struct { + builder + + data *memory.Buffer + rawData []int64 +} + +func NewInt64Builder(mem memory.Allocator) *Int64Builder { + return &Int64Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int64Builder) Append(v int64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int64Builder) UnsafeAppend(v int64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int64Builder) AppendValues(v []int64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int64Traits.BytesRequired(n)) + b.rawData = arrow.Int64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder +// so it can be used to build a new array. +func (b *Int64Builder) NewArray() Interface { + return b.NewInt64Array() +} + +// NewInt64Array creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder +// so it can be used to build a new array. +func (b *Int64Builder) NewInt64Array() (a *Int64) { + data := b.newData() + a = NewInt64Data(data) + data.Release() + return +} + +func (b *Int64Builder) newData() (data *Data) { + bytesRequired := arrow.Int64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Uint64Builder struct { + builder + + data *memory.Buffer + rawData []uint64 +} + +func NewUint64Builder(mem memory.Allocator) *Uint64Builder { + return &Uint64Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint64Builder) Append(v uint64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint64Builder) UnsafeAppend(v uint64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint64Builder) AppendValues(v []uint64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint64Traits.BytesRequired(n)) + b.rawData = arrow.Uint64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder +// so it can be used to build a new array. +func (b *Uint64Builder) NewArray() Interface { + return b.NewUint64Array() +} + +// NewUint64Array creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder +// so it can be used to build a new array. +func (b *Uint64Builder) NewUint64Array() (a *Uint64) { + data := b.newData() + a = NewUint64Data(data) + data.Release() + return +} + +func (b *Uint64Builder) newData() (data *Data) { + bytesRequired := arrow.Uint64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Float64Builder struct { + builder + + data *memory.Buffer + rawData []float64 +} + +func NewFloat64Builder(mem memory.Allocator) *Float64Builder { + return &Float64Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Float64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Float64Builder) Append(v float64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Float64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Float64Builder) UnsafeAppend(v float64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Float64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Float64Builder) AppendValues(v []float64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Float64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Float64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Float64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Float64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Float64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Float64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Float64Traits.BytesRequired(n)) + b.rawData = arrow.Float64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder +// so it can be used to build a new array. +func (b *Float64Builder) NewArray() Interface { + return b.NewFloat64Array() +} + +// NewFloat64Array creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder +// so it can be used to build a new array. +func (b *Float64Builder) NewFloat64Array() (a *Float64) { + data := b.newData() + a = NewFloat64Data(data) + data.Release() + return +} + +func (b *Float64Builder) newData() (data *Data) { + bytesRequired := arrow.Float64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Float64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Int32Builder struct { + builder + + data *memory.Buffer + rawData []int32 +} + +func NewInt32Builder(mem memory.Allocator) *Int32Builder { + return &Int32Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int32Builder) Append(v int32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int32Builder) UnsafeAppend(v int32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int32Builder) AppendValues(v []int32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int32Traits.BytesRequired(n)) + b.rawData = arrow.Int32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder +// so it can be used to build a new array. +func (b *Int32Builder) NewArray() Interface { + return b.NewInt32Array() +} + +// NewInt32Array creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder +// so it can be used to build a new array. +func (b *Int32Builder) NewInt32Array() (a *Int32) { + data := b.newData() + a = NewInt32Data(data) + data.Release() + return +} + +func (b *Int32Builder) newData() (data *Data) { + bytesRequired := arrow.Int32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Uint32Builder struct { + builder + + data *memory.Buffer + rawData []uint32 +} + +func NewUint32Builder(mem memory.Allocator) *Uint32Builder { + return &Uint32Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint32Builder) Append(v uint32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint32Builder) UnsafeAppend(v uint32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint32Builder) AppendValues(v []uint32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint32Traits.BytesRequired(n)) + b.rawData = arrow.Uint32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder +// so it can be used to build a new array. +func (b *Uint32Builder) NewArray() Interface { + return b.NewUint32Array() +} + +// NewUint32Array creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder +// so it can be used to build a new array. +func (b *Uint32Builder) NewUint32Array() (a *Uint32) { + data := b.newData() + a = NewUint32Data(data) + data.Release() + return +} + +func (b *Uint32Builder) newData() (data *Data) { + bytesRequired := arrow.Uint32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Float32Builder struct { + builder + + data *memory.Buffer + rawData []float32 +} + +func NewFloat32Builder(mem memory.Allocator) *Float32Builder { + return &Float32Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Float32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Float32Builder) Append(v float32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Float32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Float32Builder) UnsafeAppend(v float32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Float32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Float32Builder) AppendValues(v []float32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Float32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Float32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Float32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Float32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Float32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Float32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Float32Traits.BytesRequired(n)) + b.rawData = arrow.Float32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder +// so it can be used to build a new array. +func (b *Float32Builder) NewArray() Interface { + return b.NewFloat32Array() +} + +// NewFloat32Array creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder +// so it can be used to build a new array. +func (b *Float32Builder) NewFloat32Array() (a *Float32) { + data := b.newData() + a = NewFloat32Data(data) + data.Release() + return +} + +func (b *Float32Builder) newData() (data *Data) { + bytesRequired := arrow.Float32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Float32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Int16Builder struct { + builder + + data *memory.Buffer + rawData []int16 +} + +func NewInt16Builder(mem memory.Allocator) *Int16Builder { + return &Int16Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int16Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int16Builder) Append(v int16) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int16Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int16Builder) UnsafeAppend(v int16) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int16Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int16Builder) AppendValues(v []int16, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int16Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int16Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int16Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int16Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int16Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int16Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int16Traits.BytesRequired(n)) + b.rawData = arrow.Int16Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder +// so it can be used to build a new array. +func (b *Int16Builder) NewArray() Interface { + return b.NewInt16Array() +} + +// NewInt16Array creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder +// so it can be used to build a new array. +func (b *Int16Builder) NewInt16Array() (a *Int16) { + data := b.newData() + a = NewInt16Data(data) + data.Release() + return +} + +func (b *Int16Builder) newData() (data *Data) { + bytesRequired := arrow.Int16Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Uint16Builder struct { + builder + + data *memory.Buffer + rawData []uint16 +} + +func NewUint16Builder(mem memory.Allocator) *Uint16Builder { + return &Uint16Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint16Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint16Builder) Append(v uint16) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint16Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint16Builder) UnsafeAppend(v uint16) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint16Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint16Builder) AppendValues(v []uint16, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint16Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint16Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint16Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint16Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint16Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint16Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint16Traits.BytesRequired(n)) + b.rawData = arrow.Uint16Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder +// so it can be used to build a new array. +func (b *Uint16Builder) NewArray() Interface { + return b.NewUint16Array() +} + +// NewUint16Array creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder +// so it can be used to build a new array. +func (b *Uint16Builder) NewUint16Array() (a *Uint16) { + data := b.newData() + a = NewUint16Data(data) + data.Release() + return +} + +func (b *Uint16Builder) newData() (data *Data) { + bytesRequired := arrow.Uint16Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Int8Builder struct { + builder + + data *memory.Buffer + rawData []int8 +} + +func NewInt8Builder(mem memory.Allocator) *Int8Builder { + return &Int8Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int8Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int8Builder) Append(v int8) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int8Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int8Builder) UnsafeAppend(v int8) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int8Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int8Builder) AppendValues(v []int8, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int8Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int8Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int8Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int8Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int8Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int8Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int8Traits.BytesRequired(n)) + b.rawData = arrow.Int8Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder +// so it can be used to build a new array. +func (b *Int8Builder) NewArray() Interface { + return b.NewInt8Array() +} + +// NewInt8Array creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder +// so it can be used to build a new array. +func (b *Int8Builder) NewInt8Array() (a *Int8) { + data := b.newData() + a = NewInt8Data(data) + data.Release() + return +} + +func (b *Int8Builder) newData() (data *Data) { + bytesRequired := arrow.Int8Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int8, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Uint8Builder struct { + builder + + data *memory.Buffer + rawData []uint8 +} + +func NewUint8Builder(mem memory.Allocator) *Uint8Builder { + return &Uint8Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint8Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint8Builder) Append(v uint8) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint8Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint8Builder) UnsafeAppend(v uint8) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint8Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint8Builder) AppendValues(v []uint8, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint8Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint8Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint8Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint8Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint8Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint8Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint8Traits.BytesRequired(n)) + b.rawData = arrow.Uint8Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder +// so it can be used to build a new array. +func (b *Uint8Builder) NewArray() Interface { + return b.NewUint8Array() +} + +// NewUint8Array creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder +// so it can be used to build a new array. +func (b *Uint8Builder) NewUint8Array() (a *Uint8) { + data := b.newData() + a = NewUint8Data(data) + data.Release() + return +} + +func (b *Uint8Builder) newData() (data *Data) { + bytesRequired := arrow.Uint8Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint8, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type TimestampBuilder struct { + builder + + dtype *arrow.TimestampType + data *memory.Buffer + rawData []arrow.Timestamp +} + +func NewTimestampBuilder(mem memory.Allocator, dtype *arrow.TimestampType) *TimestampBuilder { + return &TimestampBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *TimestampBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *TimestampBuilder) Append(v arrow.Timestamp) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *TimestampBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *TimestampBuilder) UnsafeAppend(v arrow.Timestamp) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *TimestampBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *TimestampBuilder) AppendValues(v []arrow.Timestamp, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.TimestampTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *TimestampBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.TimestampTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *TimestampBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *TimestampBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.TimestampTraits.BytesRequired(n)) + b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder +// so it can be used to build a new array. +func (b *TimestampBuilder) NewArray() Interface { + return b.NewTimestampArray() +} + +// NewTimestampArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder +// so it can be used to build a new array. +func (b *TimestampBuilder) NewTimestampArray() (a *Timestamp) { + data := b.newData() + a = NewTimestampData(data) + data.Release() + return +} + +func (b *TimestampBuilder) newData() (data *Data) { + bytesRequired := arrow.TimestampTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Time32Builder struct { + builder + + dtype *arrow.Time32Type + data *memory.Buffer + rawData []arrow.Time32 +} + +func NewTime32Builder(mem memory.Allocator, dtype *arrow.Time32Type) *Time32Builder { + return &Time32Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Time32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Time32Builder) Append(v arrow.Time32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Time32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Time32Builder) UnsafeAppend(v arrow.Time32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Time32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Time32Builder) AppendValues(v []arrow.Time32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Time32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Time32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Time32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Time32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Time32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Time32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Time32Traits.BytesRequired(n)) + b.rawData = arrow.Time32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder +// so it can be used to build a new array. +func (b *Time32Builder) NewArray() Interface { + return b.NewTime32Array() +} + +// NewTime32Array creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder +// so it can be used to build a new array. +func (b *Time32Builder) NewTime32Array() (a *Time32) { + data := b.newData() + a = NewTime32Data(data) + data.Release() + return +} + +func (b *Time32Builder) newData() (data *Data) { + bytesRequired := arrow.Time32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Time64Builder struct { + builder + + dtype *arrow.Time64Type + data *memory.Buffer + rawData []arrow.Time64 +} + +func NewTime64Builder(mem memory.Allocator, dtype *arrow.Time64Type) *Time64Builder { + return &Time64Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Time64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Time64Builder) Append(v arrow.Time64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Time64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Time64Builder) UnsafeAppend(v arrow.Time64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Time64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Time64Builder) AppendValues(v []arrow.Time64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Time64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Time64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Time64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Time64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Time64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Time64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Time64Traits.BytesRequired(n)) + b.rawData = arrow.Time64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder +// so it can be used to build a new array. +func (b *Time64Builder) NewArray() Interface { + return b.NewTime64Array() +} + +// NewTime64Array creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder +// so it can be used to build a new array. +func (b *Time64Builder) NewTime64Array() (a *Time64) { + data := b.newData() + a = NewTime64Data(data) + data.Release() + return +} + +func (b *Time64Builder) newData() (data *Data) { + bytesRequired := arrow.Time64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Date32Builder struct { + builder + + data *memory.Buffer + rawData []arrow.Date32 +} + +func NewDate32Builder(mem memory.Allocator) *Date32Builder { + return &Date32Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Date32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Date32Builder) Append(v arrow.Date32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Date32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Date32Builder) UnsafeAppend(v arrow.Date32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Date32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Date32Builder) AppendValues(v []arrow.Date32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Date32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Date32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Date32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Date32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Date32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Date32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Date32Traits.BytesRequired(n)) + b.rawData = arrow.Date32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder +// so it can be used to build a new array. +func (b *Date32Builder) NewArray() Interface { + return b.NewDate32Array() +} + +// NewDate32Array creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder +// so it can be used to build a new array. +func (b *Date32Builder) NewDate32Array() (a *Date32) { + data := b.newData() + a = NewDate32Data(data) + data.Release() + return +} + +func (b *Date32Builder) newData() (data *Data) { + bytesRequired := arrow.Date32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Date32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type Date64Builder struct { + builder + + data *memory.Buffer + rawData []arrow.Date64 +} + +func NewDate64Builder(mem memory.Allocator) *Date64Builder { + return &Date64Builder{builder: builder{refCount: 1, mem: mem}} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Date64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Date64Builder) Append(v arrow.Date64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Date64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Date64Builder) UnsafeAppend(v arrow.Date64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Date64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Date64Builder) AppendValues(v []arrow.Date64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Date64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Date64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Date64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Date64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Date64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Date64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Date64Traits.BytesRequired(n)) + b.rawData = arrow.Date64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder +// so it can be used to build a new array. +func (b *Date64Builder) NewArray() Interface { + return b.NewDate64Array() +} + +// NewDate64Array creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder +// so it can be used to build a new array. +func (b *Date64Builder) NewDate64Array() (a *Date64) { + data := b.newData() + a = NewDate64Data(data) + data.Release() + return +} + +func (b *Date64Builder) newData() (data *Data) { + bytesRequired := arrow.Date64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Date64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +type DurationBuilder struct { + builder + + dtype *arrow.DurationType + data *memory.Buffer + rawData []arrow.Duration +} + +func NewDurationBuilder(mem memory.Allocator, dtype *arrow.DurationType) *DurationBuilder { + return &DurationBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *DurationBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *DurationBuilder) Append(v arrow.Duration) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *DurationBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *DurationBuilder) UnsafeAppend(v arrow.Duration) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *DurationBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *DurationBuilder) AppendValues(v []arrow.Duration, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.DurationTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *DurationBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.DurationTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.DurationTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *DurationBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *DurationBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.DurationTraits.BytesRequired(n)) + b.rawData = arrow.DurationTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder +// so it can be used to build a new array. +func (b *DurationBuilder) NewArray() Interface { + return b.NewDurationArray() +} + +// NewDurationArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder +// so it can be used to build a new array. +func (b *DurationBuilder) NewDurationArray() (a *Duration) { + data := b.newData() + a = NewDurationData(data) + data.Release() + return +} + +func (b *DurationBuilder) newData() (data *Data) { + bytesRequired := arrow.DurationTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +var ( + _ Builder = (*Int64Builder)(nil) + _ Builder = (*Uint64Builder)(nil) + _ Builder = (*Float64Builder)(nil) + _ Builder = (*Int32Builder)(nil) + _ Builder = (*Uint32Builder)(nil) + _ Builder = (*Float32Builder)(nil) + _ Builder = (*Int16Builder)(nil) + _ Builder = (*Uint16Builder)(nil) + _ Builder = (*Int8Builder)(nil) + _ Builder = (*Uint8Builder)(nil) + _ Builder = (*TimestampBuilder)(nil) + _ Builder = (*Time32Builder)(nil) + _ Builder = (*Time64Builder)(nil) + _ Builder = (*Date32Builder)(nil) + _ Builder = (*Date64Builder)(nil) + _ Builder = (*DurationBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen.go.tmpl new file mode 100644 index 000000000000..149a88aed915 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen.go.tmpl @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +{{range .In}} + +type {{.Name}}Builder struct { + builder + +{{if .Opt.Parametric -}} + dtype *arrow.{{.Name}}Type +{{end -}} + data *memory.Buffer + rawData []{{or .QualifiedType .Type}} +} + +{{if .Opt.Parametric}} +func New{{.Name}}Builder(mem memory.Allocator, dtype *arrow.{{.Name}}Type) *{{.Name}}Builder { + return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}, dtype: dtype} +} +{{else}} +func New{{.Name}}Builder(mem memory.Allocator) *{{.Name}}Builder { + return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}} +} +{{end}} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *{{.Name}}Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *{{.Name}}Builder) Append(v {{or .QualifiedType .Type}}) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *{{.Name}}Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *{{.Name}}Builder) UnsafeAppend(v {{or .QualifiedType .Type}}) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *{{.Name}}Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *{{.Name}}Builder) AppendValues(v []{{or .QualifiedType .Type}}, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.{{.Name}}Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *{{.Name}}Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.{{.Name}}Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.{{.Name}}Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *{{.Name}}Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *{{.Name}}Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.{{.Name}}Traits.BytesRequired(n)) + b.rawData = arrow.{{.Name}}Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder +// so it can be used to build a new array. +func (b *{{.Name}}Builder) NewArray() Interface { + return b.New{{.Name}}Array() +} + +// New{{.Name}}Array creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder +// so it can be used to build a new array. +func (b *{{.Name}}Builder) New{{.Name}}Array() (a *{{.Name}}) { + data := b.newData() + a = New{{.Name}}Data(data) + data.Release() + return +} + +func (b *{{.Name}}Builder) newData() (data *Data) { + bytesRequired := arrow.{{.Name}}Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } +{{if .Opt.Parametric -}} + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) +{{else -}} + data = NewData(arrow.PrimitiveTypes.{{.Name}}, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) +{{end -}} + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} +{{end}} + +var ( +{{- range .In}} + _ Builder = (*{{.Name}}Builder)(nil) +{{- end}} +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen_test.go.tmpl new file mode 100644 index 000000000000..e4f78108830c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/numericbuilder.gen_test.go.tmpl @@ -0,0 +1,216 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array_test + +import ( + "testing" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/memory" + "github.com/stretchr/testify/assert" +) + +{{range .In}} +func TestNew{{.Name}}Builder(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + ab.Retain() + ab.Release() + + ab.Append(1) + ab.Append(2) + ab.Append(3) + ab.AppendNull() + ab.Append(5) + ab.Append(6) + ab.AppendNull() + ab.Append(8) + ab.Append(9) + ab.Append(10) + + // check state of builder before New{{.Name}}Array + assert.Equal(t, 10, ab.Len(), "unexpected Len()") + assert.Equal(t, 2, ab.NullN(), "unexpected NullN()") + + a := ab.New{{.Name}}Array() + + // check state of builder after New{{.Name}}Array + assert.Zero(t, ab.Len(), "unexpected ArrayBuilder.Len(), New{{.Name}}Array did not reset state") + assert.Zero(t, ab.Cap(), "unexpected ArrayBuilder.Cap(), New{{.Name}}Array did not reset state") + assert.Zero(t, ab.NullN(), "unexpected ArrayBuilder.NullN(), New{{.Name}}Array did not reset state") + + // check state of array + assert.Equal(t, 2, a.NullN(), "unexpected null count") + assert.Equal(t, []{{or .QualifiedType .Type}}{1, 2, 3, 0, 5, 6, 0, 8, 9, 10}, a.{{.Name}}Values(), "unexpected {{.Name}}Values") + assert.Equal(t, []byte{0xb7}, a.NullBitmapBytes()[:1]) // 4 bytes due to minBuilderCapacity + assert.Len(t, a.{{.Name}}Values(), 10, "unexpected length of {{.Name}}Values") + + a.Release() + + ab.Append(7) + ab.Append(8) + + a = ab.New{{.Name}}Array() + + assert.Equal(t, 0, a.NullN()) + assert.Equal(t, []{{or .QualifiedType .Type}}{7, 8}, a.{{.Name}}Values()) + assert.Len(t, a.{{.Name}}Values(), 2) + + a.Release() + + var ( + want = []{{or .QualifiedType .Type}}{1, 2, 3, 4} + valids = []bool{true, true, false, true} + ) + + ab.AppendValues(want, valids) + a = ab.New{{.Name}}Array() + + sub := array.MakeFromData(a.Data()) + defer sub.Release() + + if got, want := sub.DataType().ID(), a.DataType().ID(); got != want { + t.Fatalf("invalid type: got=%q, want=%q", got, want) + } + + if _, ok := sub.(*array.{{.Name}}); !ok { + t.Fatalf("could not type-assert to array.{{.Name}}") + } + + if got, want := a.String(), `[1 2 (null) 4]`; got != want { + t.Fatalf("got=%q, want=%q", got, want) + } + + slice := array.NewSliceData(a.Data(), 2, 4) + defer slice.Release() + + sub1 := array.MakeFromData(slice) + defer sub1.Release() + + v, ok := sub1.(*array.{{.Name}}) + if !ok { + t.Fatalf("could not type-assert to array.{{.Name}}") + } + + if got, want := v.String(), `[(null) 4]`; got != want { + t.Fatalf("got=%q, want=%q", got, want) + } + + a.Release() +} + +func Test{{.Name}}Builder_AppendValues(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + exp := []{{or .QualifiedType .Type}}{0, 1, 2, 3} + ab.AppendValues(exp, nil) + a := ab.New{{.Name}}Array() + assert.Equal(t, exp, a.{{.Name}}Values()) + + a.Release() +} + +func Test{{.Name}}Builder_Empty(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + exp := []{{or .QualifiedType .Type}}{0, 1, 2, 3} + + ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil) + a := ab.New{{.Name}}Array() + assert.Zero(t, a.Len()) + a.Release() + + ab.AppendValues(nil, nil) + a = ab.New{{.Name}}Array() + assert.Zero(t, a.Len()) + a.Release() + + ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil) + ab.AppendValues(exp, nil) + a = ab.New{{.Name}}Array() + assert.Equal(t, exp, a.{{.Name}}Values()) + a.Release() + + ab.AppendValues(exp, nil) + ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil) + a = ab.New{{.Name}}Array() + assert.Equal(t, exp, a.{{.Name}}Values()) + a.Release() +} + +func Test{{.Name}}Builder_Resize(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + assert.Equal(t, 0, ab.Cap()) + assert.Equal(t, 0, ab.Len()) + + ab.Reserve(63) + assert.Equal(t, 64, ab.Cap()) + assert.Equal(t, 0, ab.Len()) + + for i := 0; i < 63; i++ { + ab.Append(0) + } + assert.Equal(t, 64, ab.Cap()) + assert.Equal(t, 63, ab.Len()) + + ab.Resize(5) + assert.Equal(t, 5, ab.Len()) + + ab.Resize(32) + assert.Equal(t, 5, ab.Len()) +} +{{end}} + + diff --git a/vendor/github.com/apache/arrow/go/arrow/array/record.go b/vendor/github.com/apache/arrow/go/arrow/array/record.go new file mode 100644 index 000000000000..ab7961d55546 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/record.go @@ -0,0 +1,345 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// RecordReader reads a stream of records. +type RecordReader interface { + Retain() + Release() + + Schema() *arrow.Schema + + Next() bool + Record() Record +} + +// simpleRecords is a simple iterator over a collection of records. +type simpleRecords struct { + refCount int64 + + schema *arrow.Schema + recs []Record + cur Record +} + +// NewRecordReader returns a simple iterator over the given slice of records. +func NewRecordReader(schema *arrow.Schema, recs []Record) (*simpleRecords, error) { + rs := &simpleRecords{ + refCount: 1, + schema: schema, + recs: recs, + cur: nil, + } + + for _, rec := range rs.recs { + rec.Retain() + } + + for _, rec := range recs { + if !rec.Schema().Equal(rs.schema) { + rs.Release() + return nil, fmt.Errorf("arrow/array: mismatch schema") + } + } + + return rs, nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (rs *simpleRecords) Retain() { + atomic.AddInt64(&rs.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (rs *simpleRecords) Release() { + debug.Assert(atomic.LoadInt64(&rs.refCount) > 0, "too many releases") + + if atomic.AddInt64(&rs.refCount, -1) == 0 { + if rs.cur != nil { + rs.cur.Release() + } + for _, rec := range rs.recs { + rec.Release() + } + rs.recs = nil + } +} + +func (rs *simpleRecords) Schema() *arrow.Schema { return rs.schema } +func (rs *simpleRecords) Record() Record { return rs.cur } +func (rs *simpleRecords) Next() bool { + if len(rs.recs) == 0 { + return false + } + if rs.cur != nil { + rs.cur.Release() + } + rs.cur = rs.recs[0] + rs.recs = rs.recs[1:] + return true +} + +// Record is a collection of equal-length arrays +// matching a particular Schema. +type Record interface { + Release() + Retain() + + Schema() *arrow.Schema + + NumRows() int64 + NumCols() int64 + + Columns() []Interface + Column(i int) Interface + ColumnName(i int) string + + // NewSlice constructs a zero-copy slice of the record with the indicated + // indices i and j, corresponding to array[i:j]. + // The returned record must be Release()'d after use. + // + // NewSlice panics if the slice is outside the valid range of the record array. + // NewSlice panics if j < i. + NewSlice(i, j int64) Record +} + +// simpleRecord is a basic, non-lazy in-memory record batch. +type simpleRecord struct { + refCount int64 + + schema *arrow.Schema + + rows int64 + arrs []Interface +} + +// NewRecord returns a basic, non-lazy in-memory record batch. +// +// NewRecord panics if the columns and schema are inconsistent. +// NewRecord panics if rows is larger than the height of the columns. +func NewRecord(schema *arrow.Schema, cols []Interface, nrows int64) *simpleRecord { + rec := &simpleRecord{ + refCount: 1, + schema: schema, + rows: nrows, + arrs: make([]Interface, len(cols)), + } + copy(rec.arrs, cols) + for _, arr := range rec.arrs { + arr.Retain() + } + + if rec.rows < 0 { + switch len(rec.arrs) { + case 0: + rec.rows = 0 + default: + rec.rows = int64(rec.arrs[0].Len()) + } + } + + err := rec.validate() + if err != nil { + rec.Release() + panic(err) + } + + return rec +} + +func (rec *simpleRecord) validate() error { + if len(rec.arrs) != len(rec.schema.Fields()) { + return fmt.Errorf("arrow/array: number of columns/fields mismatch") + } + + for i, arr := range rec.arrs { + f := rec.schema.Field(i) + if int64(arr.Len()) < rec.rows { + return fmt.Errorf("arrow/array: mismatch number of rows in column %q: got=%d, want=%d", + f.Name, + arr.Len(), rec.rows, + ) + } + if !arrow.TypeEqual(f.Type, arr.DataType()) { + return fmt.Errorf("arrow/array: column %q type mismatch: got=%v, want=%v", + f.Name, + arr.DataType(), f.Type, + ) + } + } + return nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (rec *simpleRecord) Retain() { + atomic.AddInt64(&rec.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (rec *simpleRecord) Release() { + debug.Assert(atomic.LoadInt64(&rec.refCount) > 0, "too many releases") + + if atomic.AddInt64(&rec.refCount, -1) == 0 { + for _, arr := range rec.arrs { + arr.Release() + } + rec.arrs = nil + } +} + +func (rec *simpleRecord) Schema() *arrow.Schema { return rec.schema } +func (rec *simpleRecord) NumRows() int64 { return rec.rows } +func (rec *simpleRecord) NumCols() int64 { return int64(len(rec.arrs)) } +func (rec *simpleRecord) Columns() []Interface { return rec.arrs } +func (rec *simpleRecord) Column(i int) Interface { return rec.arrs[i] } +func (rec *simpleRecord) ColumnName(i int) string { return rec.schema.Field(i).Name } + +// NewSlice constructs a zero-copy slice of the record with the indicated +// indices i and j, corresponding to array[i:j]. +// The returned record must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the record array. +// NewSlice panics if j < i. +func (rec *simpleRecord) NewSlice(i, j int64) Record { + arrs := make([]Interface, len(rec.arrs)) + for ii, arr := range rec.arrs { + arrs[ii] = NewSlice(arr, i, j) + } + defer func() { + for _, arr := range arrs { + arr.Release() + } + }() + return NewRecord(rec.schema, arrs, j-i) +} + +func (rec *simpleRecord) String() string { + o := new(strings.Builder) + fmt.Fprintf(o, "record:\n %v\n", rec.schema) + fmt.Fprintf(o, " rows: %d\n", rec.rows) + for i, col := range rec.arrs { + fmt.Fprintf(o, " col[%d][%s]: %v\n", i, rec.schema.Field(i).Name, col) + } + + return o.String() +} + +// RecordBuilder eases the process of building a Record, iteratively, from +// a known Schema. +type RecordBuilder struct { + refCount int64 + mem memory.Allocator + schema *arrow.Schema + fields []Builder +} + +// NewRecordBuilder returns a builder, using the provided memory allocator and a schema. +func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder { + b := &RecordBuilder{ + refCount: 1, + mem: mem, + schema: schema, + fields: make([]Builder, len(schema.Fields())), + } + + for i, f := range schema.Fields() { + b.fields[i] = newBuilder(b.mem, f.Type) + } + + return b +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *RecordBuilder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Release decreases the reference count by 1. +func (b *RecordBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + for _, f := range b.fields { + f.Release() + } + + if atomic.AddInt64(&b.refCount, -1) == 0 { + b.fields = nil + } +} + +func (b *RecordBuilder) Schema() *arrow.Schema { return b.schema } +func (b *RecordBuilder) Fields() []Builder { return b.fields } +func (b *RecordBuilder) Field(i int) Builder { return b.fields[i] } + +func (b *RecordBuilder) Reserve(size int) { + for _, f := range b.fields { + f.Reserve(size) + } +} + +// NewRecord creates a new record from the memory buffers and resets the +// RecordBuilder so it can be used to build a new record. +// +// The returned Record must be Release()'d after use. +// +// NewRecord panics if the fields' builder do not have the same length. +func (b *RecordBuilder) NewRecord() Record { + cols := make([]Interface, len(b.fields)) + rows := int64(0) + + defer func(cols []Interface) { + for _, col := range cols { + if col == nil { + continue + } + col.Release() + } + }(cols) + + for i, f := range b.fields { + cols[i] = f.NewArray() + irow := int64(cols[i].Len()) + if i > 0 && irow != rows { + panic(fmt.Errorf("arrow/array: field %d has %d rows. want=%d", i, irow, rows)) + } + rows = irow + } + + return NewRecord(b.schema, cols, rows) +} + +var ( + _ Record = (*simpleRecord)(nil) + _ RecordReader = (*simpleRecords)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/string.go b/vendor/github.com/apache/arrow/go/arrow/array/string.go new file mode 100644 index 000000000000..42e87d8dfa48 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/string.go @@ -0,0 +1,205 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "math" + "strings" + "unsafe" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/memory" +) + +const ( + stringArrayMaximumCapacity = math.MaxInt32 +) + +// String represents an immutable sequence of variable-length UTF-8 strings. +type String struct { + array + offsets []int32 + values string +} + +// NewStringData constructs a new String array from data. +func NewStringData(data *Data) *String { + a := &String{} + a.refCount = 1 + a.setData(data) + return a +} + +// Reset resets the String with a different set of Data. +func (a *String) Reset(data *Data) { + a.setData(data) +} + +// Value returns the slice at index i. This value should not be mutated. +func (a *String) Value(i int) string { + i = i + a.array.data.offset + return a.values[a.offsets[i]:a.offsets[i+1]] +} + +// ValueOffset returns the offset of the value at index i. +func (a *String) ValueOffset(i int) int { return int(a.offsets[i]) } + +func (a *String) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *String) setData(data *Data) { + if len(data.buffers) != 3 { + panic("arrow/array: len(data.buffers) != 3") + } + + a.array.setData(data) + + if vdata := data.buffers[2]; vdata != nil { + b := vdata.Bytes() + a.values = *(*string)(unsafe.Pointer(&b)) + } + + if offsets := data.buffers[1]; offsets != nil { + a.offsets = arrow.Int32Traits.CastFromBytes(offsets.Bytes()) + } +} + +func arrayEqualString(left, right *String) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A StringBuilder is used to build a String array using the Append methods. +type StringBuilder struct { + builder *BinaryBuilder +} + +// NewStringBuilder creates a new StringBuilder. +func NewStringBuilder(mem memory.Allocator) *StringBuilder { + b := &StringBuilder{ + builder: NewBinaryBuilder(mem, arrow.BinaryTypes.String), + } + return b +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *StringBuilder) Release() { + b.builder.Release() +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *StringBuilder) Retain() { + b.builder.Retain() +} + +// +// Len returns the number of elements in the array builder. +func (b *StringBuilder) Len() int { return b.builder.Len() } + +// Cap returns the total number of elements that can be stored without allocating additional memory. +func (b *StringBuilder) Cap() int { return b.builder.Cap() } + +// NullN returns the number of null values in the array builder. +func (b *StringBuilder) NullN() int { return b.builder.NullN() } + +// Append appends a string to the builder. +func (b *StringBuilder) Append(v string) { + b.builder.Append([]byte(v)) +} + +// AppendNull appends a null to the builder. +func (b *StringBuilder) AppendNull() { + b.builder.AppendNull() +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *StringBuilder) AppendValues(v []string, valid []bool) { + b.builder.AppendStringValues(v, valid) +} + +// Value returns the string at index i. +func (b *StringBuilder) Value(i int) string { + return string(b.builder.Value(i)) +} + +func (b *StringBuilder) init(capacity int) { + b.builder.init(capacity) +} + +func (b *StringBuilder) resize(newBits int, init func(int)) { + b.builder.resize(newBits, init) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *StringBuilder) Reserve(n int) { + b.builder.Reserve(n) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *StringBuilder) Resize(n int) { + b.builder.Resize(n) +} + +// NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder +// so it can be used to build a new array. +func (b *StringBuilder) NewArray() Interface { + return b.NewStringArray() +} + +// NewStringArray creates a String array from the memory buffers used by the builder and resets the StringBuilder +// so it can be used to build a new array. +func (b *StringBuilder) NewStringArray() (a *String) { + data := b.builder.newData() + a = NewStringData(data) + data.Release() + return +} + +var ( + _ Interface = (*String)(nil) + _ Builder = (*StringBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/struct.go b/vendor/github.com/apache/arrow/go/arrow/array/struct.go new file mode 100644 index 000000000000..a718de03b903 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/struct.go @@ -0,0 +1,278 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/memory" +) + +// Struct represents an ordered sequence of relative types. +type Struct struct { + array + fields []Interface +} + +// NewStructData returns a new Struct array value from data. +func NewStructData(data *Data) *Struct { + a := &Struct{} + a.refCount = 1 + a.setData(data) + return a +} + +func (a *Struct) NumField() int { return len(a.fields) } +func (a *Struct) Field(i int) Interface { return a.fields[i] } + +func (a *Struct) String() string { + o := new(strings.Builder) + o.WriteString("{") + + structBitmap := a.NullBitmapBytes() + for i, v := range a.fields { + if i > 0 { + o.WriteString(" ") + } + if !bytes.Equal(structBitmap, v.NullBitmapBytes()) { + masked := a.newStructFieldWithParentValidityMask(i) + fmt.Fprintf(o, "%v", masked) + masked.Release() + continue + } + fmt.Fprintf(o, "%v", v) + } + o.WriteString("}") + return o.String() +} + +// newStructFieldWithParentValidityMask returns the Interface at fieldIndex +// with a nullBitmapBytes adjusted according on the parent struct nullBitmapBytes. +// From the docs: +// "When reading the struct array the parent validity bitmap takes priority." +func (a *Struct) newStructFieldWithParentValidityMask(fieldIndex int) Interface { + field := a.Field(fieldIndex) + nullBitmapBytes := field.NullBitmapBytes() + maskedNullBitmapBytes := make([]byte, len(nullBitmapBytes)) + copy(maskedNullBitmapBytes, nullBitmapBytes) + for i := 0; i < field.Len(); i++ { + if !a.IsValid(i) { + bitutil.ClearBit(maskedNullBitmapBytes, i) + } + } + data := NewSliceData(field.Data(), 0, int64(field.Len())) + defer data.Release() + bufs := make([]*memory.Buffer, len(data.buffers)) + copy(bufs, data.buffers) + bufs[0].Release() + bufs[0] = memory.NewBufferBytes(maskedNullBitmapBytes) + data.buffers = bufs + maskedField := MakeFromData(data) + return maskedField +} + +func (a *Struct) setData(data *Data) { + a.array.setData(data) + a.fields = make([]Interface, len(data.childData)) + for i, child := range data.childData { + if data.offset != 0 || child.length != data.length { + sub := NewSliceData(child, int64(data.offset), int64(data.offset+data.length)) + a.fields[i] = MakeFromData(sub) + sub.Release() + } else { + a.fields[i] = MakeFromData(child) + } + } +} + +func arrayEqualStruct(left, right *Struct) bool { + for i, lf := range left.fields { + rf := right.fields[i] + if !ArrayEqual(lf, rf) { + return false + } + } + return true +} + +func (a *Struct) Retain() { + a.array.Retain() + for _, f := range a.fields { + f.Retain() + } +} + +func (a *Struct) Release() { + a.array.Release() + for _, f := range a.fields { + f.Release() + } +} + +type StructBuilder struct { + builder + + dtype arrow.DataType + fields []Builder +} + +// NewStructBuilder returns a builder, using the provided memory allocator. +func NewStructBuilder(mem memory.Allocator, dtype *arrow.StructType) *StructBuilder { + b := &StructBuilder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + fields: make([]Builder, len(dtype.Fields())), + } + for i, f := range dtype.Fields() { + b.fields[i] = newBuilder(b.mem, f.Type) + } + return b +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *StructBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + } + + for _, f := range b.fields { + f.Release() + } +} + +func (b *StructBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) + if !v { + for _, f := range b.fields { + f.AppendNull() + } + } +} + +func (b *StructBuilder) AppendValues(valids []bool) { + b.Reserve(len(valids)) + b.builder.unsafeAppendBoolsToBitmap(valids, len(valids)) +} + +func (b *StructBuilder) AppendNull() { b.Append(false) } + +func (b *StructBuilder) unsafeAppend(v bool) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.length++ +} + +func (b *StructBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *StructBuilder) init(capacity int) { + b.builder.init(capacity) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *StructBuilder) Reserve(n int) { + b.builder.reserve(n, b.resizeHelper) + for _, f := range b.fields { + f.Reserve(n) + } +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *StructBuilder) Resize(n int) { + b.resizeHelper(n) + for _, f := range b.fields { + f.Resize(n) + } +} + +func (b *StructBuilder) resizeHelper(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *StructBuilder) NumField() int { return len(b.fields) } +func (b *StructBuilder) FieldBuilder(i int) Builder { return b.fields[i] } + +// NewArray creates a Struct array from the memory buffers used by the builder and resets the StructBuilder +// so it can be used to build a new array. +func (b *StructBuilder) NewArray() Interface { + return b.NewStructArray() +} + +// NewStructArray creates a Struct array from the memory buffers used by the builder and resets the StructBuilder +// so it can be used to build a new array. +func (b *StructBuilder) NewStructArray() (a *Struct) { + data := b.newData() + a = NewStructData(data) + data.Release() + return +} + +func (b *StructBuilder) newData() (data *Data) { + fields := make([]*Data, len(b.fields)) + for i, f := range b.fields { + arr := f.NewArray() + defer arr.Release() + fields[i] = arr.Data() + } + + data = NewData( + b.dtype, b.length, + []*memory.Buffer{ + b.nullBitmap, + nil, // FIXME(sbinet) + }, + fields, + b.nulls, + 0, + ) + b.reset() + + return +} + +var ( + _ Interface = (*Struct)(nil) + _ Builder = (*StructBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/table.go b/vendor/github.com/apache/arrow/go/arrow/array/table.go new file mode 100644 index 000000000000..ded6d11ebbc0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/table.go @@ -0,0 +1,455 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "errors" + "fmt" + "math" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/debug" +) + +// Table represents a logical sequence of chunked arrays. +type Table interface { + Schema() *arrow.Schema + NumRows() int64 + NumCols() int64 + Column(i int) *Column + + Retain() + Release() +} + +// Column is an immutable column data structure consisting of +// a field (type metadata) and a chunked data array. +type Column struct { + field arrow.Field + data *Chunked +} + +// NewColumn returns a column from a field and a chunked data array. +// +// NewColumn panics if the field's data type is inconsistent with the data type +// of the chunked data array. +func NewColumn(field arrow.Field, chunks *Chunked) *Column { + col := Column{ + field: field, + data: chunks, + } + col.data.Retain() + + if !arrow.TypeEqual(col.data.DataType(), col.field.Type) { + col.data.Release() + panic("arrow/array: inconsistent data type") + } + + return &col +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (col *Column) Retain() { + col.data.Retain() +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (col *Column) Release() { + col.data.Release() +} + +func (col *Column) Len() int { return col.data.Len() } +func (col *Column) NullN() int { return col.data.NullN() } +func (col *Column) Data() *Chunked { return col.data } +func (col *Column) Field() arrow.Field { return col.field } +func (col *Column) Name() string { return col.field.Name } +func (col *Column) DataType() arrow.DataType { return col.field.Type } + +// NewSlice returns a new zero-copy slice of the column with the indicated +// indices i and j, corresponding to the column's array[i:j]. +// The returned column must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the column's array. +// NewSlice panics if j < i. +func (col *Column) NewSlice(i, j int64) *Column { + return &Column{ + field: col.field, + data: col.data.NewSlice(i, j), + } +} + +// Chunked manages a collection of primitives arrays as one logical large array. +type Chunked struct { + chunks []Interface + + refCount int64 + + length int + nulls int + dtype arrow.DataType +} + +// NewChunked returns a new chunked array from the slice of arrays. +// +// NewChunked panics if the chunks do not have the same data type. +func NewChunked(dtype arrow.DataType, chunks []Interface) *Chunked { + arr := &Chunked{ + chunks: make([]Interface, len(chunks)), + refCount: 1, + dtype: dtype, + } + for i, chunk := range chunks { + if !arrow.TypeEqual(chunk.DataType(), dtype) { + panic("arrow/array: mismatch data type") + } + chunk.Retain() + arr.chunks[i] = chunk + arr.length += chunk.Len() + arr.nulls += chunk.NullN() + } + return arr +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (a *Chunked) Retain() { + atomic.AddInt64(&a.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (a *Chunked) Release() { + debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases") + + if atomic.AddInt64(&a.refCount, -1) == 0 { + for _, arr := range a.chunks { + arr.Release() + } + a.chunks = nil + a.length = 0 + a.nulls = 0 + } +} + +func (a *Chunked) Len() int { return a.length } +func (a *Chunked) NullN() int { return a.nulls } +func (a *Chunked) DataType() arrow.DataType { return a.dtype } +func (a *Chunked) Chunks() []Interface { return a.chunks } +func (a *Chunked) Chunk(i int) Interface { return a.chunks[i] } + +// NewSlice constructs a zero-copy slice of the chunked array with the indicated +// indices i and j, corresponding to array[i:j]. +// The returned chunked array must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the input array. +// NewSlice panics if j < i. +func (a *Chunked) NewSlice(i, j int64) *Chunked { + if j > int64(a.length) || i > j || i > int64(a.length) { + panic("arrow/array: index out of range") + } + + var ( + cur = 0 + beg = i + sz = j - i + chunks = make([]Interface, 0, len(a.chunks)) + ) + + for cur < len(a.chunks) && beg >= int64(a.chunks[cur].Len()) { + beg -= int64(a.chunks[cur].Len()) + cur++ + } + + for cur < len(a.chunks) && sz > 0 { + arr := a.chunks[cur] + end := beg + sz + if end > int64(arr.Len()) { + end = int64(arr.Len()) + } + chunks = append(chunks, NewSlice(arr, beg, end)) + sz -= int64(arr.Len()) - beg + beg = 0 + cur++ + } + chunks = chunks[:len(chunks):len(chunks)] + defer func() { + for _, chunk := range chunks { + chunk.Release() + } + }() + + return NewChunked(a.dtype, chunks) +} + +// simpleTable is a basic, non-lazy in-memory table. +type simpleTable struct { + refCount int64 + + rows int64 + cols []Column + + schema *arrow.Schema +} + +// NewTable returns a new basic, non-lazy in-memory table. +// If rows is negative, the number of rows will be inferred from the height +// of the columns. +// +// NewTable panics if the columns and schema are inconsistent. +// NewTable panics if rows is larger than the height of the columns. +func NewTable(schema *arrow.Schema, cols []Column, rows int64) *simpleTable { + tbl := simpleTable{ + refCount: 1, + rows: rows, + cols: cols, + schema: schema, + } + + if tbl.rows < 0 { + switch len(tbl.cols) { + case 0: + tbl.rows = 0 + default: + tbl.rows = int64(tbl.cols[0].Len()) + } + } + + // validate the table and its constituents. + // note we retain the columns after having validated the table + // in case the validation fails and panics (and would otherwise leak + // a ref-count on the columns.) + tbl.validate() + + for i := range tbl.cols { + tbl.cols[i].Retain() + } + + return &tbl +} + +// NewTableFromRecords returns a new basic, non-lazy in-memory table. +// +// NewTableFromRecords panics if the records and schema are inconsistent. +func NewTableFromRecords(schema *arrow.Schema, recs []Record) *simpleTable { + arrs := make([]Interface, len(recs)) + cols := make([]Column, len(schema.Fields())) + + defer func(cols []Column) { + for i := range cols { + cols[i].Release() + } + }(cols) + + for i := range cols { + field := schema.Field(i) + for j, rec := range recs { + arrs[j] = rec.Column(i) + } + chunk := NewChunked(field.Type, arrs) + cols[i] = *NewColumn(field, chunk) + chunk.Release() + } + + return NewTable(schema, cols, -1) +} + +func (tbl *simpleTable) Schema() *arrow.Schema { return tbl.schema } +func (tbl *simpleTable) NumRows() int64 { return tbl.rows } +func (tbl *simpleTable) NumCols() int64 { return int64(len(tbl.cols)) } +func (tbl *simpleTable) Column(i int) *Column { return &tbl.cols[i] } + +func (tbl *simpleTable) validate() { + if len(tbl.cols) != len(tbl.schema.Fields()) { + panic(errors.New("arrow/array: table schema mismatch")) + } + for i, col := range tbl.cols { + if !col.field.Equal(tbl.schema.Field(i)) { + panic(fmt.Errorf("arrow/array: column field %q is inconsistent with schema", col.Name())) + } + + if int64(col.Len()) < tbl.rows { + panic(fmt.Errorf("arrow/array: column %q expected length >= %d but got length %d", col.Name(), tbl.rows, col.Len())) + } + } +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (tbl *simpleTable) Retain() { + atomic.AddInt64(&tbl.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (tbl *simpleTable) Release() { + debug.Assert(atomic.LoadInt64(&tbl.refCount) > 0, "too many releases") + + if atomic.AddInt64(&tbl.refCount, -1) == 0 { + for i := range tbl.cols { + tbl.cols[i].Release() + } + tbl.cols = nil + } +} + +// TableReader is a Record iterator over a (possibly chunked) Table +type TableReader struct { + refCount int64 + + tbl Table + cur int64 // current row + max int64 // total number of rows + rec Record // current Record + chksz int64 // chunk size + + chunks []*Chunked + slots []int // chunk indices + offsets []int64 // chunk offsets +} + +// NewTableReader returns a new TableReader to iterate over the (possibly chunked) Table. +// if chunkSize is <= 0, the biggest possible chunk will be selected. +func NewTableReader(tbl Table, chunkSize int64) *TableReader { + ncols := tbl.NumCols() + tr := &TableReader{ + refCount: 1, + tbl: tbl, + cur: 0, + max: int64(tbl.NumRows()), + chksz: chunkSize, + chunks: make([]*Chunked, ncols), + slots: make([]int, ncols), + offsets: make([]int64, ncols), + } + tr.tbl.Retain() + + if tr.chksz <= 0 { + tr.chksz = math.MaxInt64 + } + + for i := range tr.chunks { + col := tr.tbl.Column(i) + tr.chunks[i] = col.Data() + tr.chunks[i].Retain() + } + return tr +} + +func (tr *TableReader) Schema() *arrow.Schema { return tr.tbl.Schema() } +func (tr *TableReader) Record() Record { return tr.rec } + +func (tr *TableReader) Next() bool { + if tr.cur >= tr.max { + return false + } + + if tr.rec != nil { + tr.rec.Release() + } + + // determine the minimum contiguous slice across all columns + chunksz := imin64(tr.max, tr.chksz) + chunks := make([]Interface, len(tr.chunks)) + for i := range chunks { + j := tr.slots[i] + chunk := tr.chunks[i].Chunk(j) + remain := int64(chunk.Len()) - tr.offsets[i] + if remain < chunksz { + chunksz = remain + } + + chunks[i] = chunk + } + + // slice the chunks, advance each chunk slot as appropriate. + batch := make([]Interface, len(tr.chunks)) + for i, chunk := range chunks { + var slice Interface + offset := tr.offsets[i] + switch int64(chunk.Len()) - offset { + case chunksz: + tr.slots[i]++ + tr.offsets[i] = 0 + if offset > 0 { + // need to slice + slice = NewSlice(chunk, offset, offset+chunksz) + } else { + // no need to slice + slice = chunk + slice.Retain() + } + default: + tr.offsets[i] += chunksz + slice = NewSlice(chunk, offset, offset+chunksz) + } + batch[i] = slice + } + + tr.cur += chunksz + tr.rec = NewRecord(tr.tbl.Schema(), batch, chunksz) + + for _, arr := range batch { + arr.Release() + } + + return true +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (tr *TableReader) Retain() { + atomic.AddInt64(&tr.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (tr *TableReader) Release() { + debug.Assert(atomic.LoadInt64(&tr.refCount) > 0, "too many releases") + + if atomic.AddInt64(&tr.refCount, -1) == 0 { + tr.tbl.Release() + for _, chk := range tr.chunks { + chk.Release() + } + if tr.rec != nil { + tr.rec.Release() + } + tr.tbl = nil + tr.chunks = nil + tr.slots = nil + tr.offsets = nil + } +} + +func imin64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +var ( + _ Table = (*simpleTable)(nil) + _ RecordReader = (*TableReader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/array/util.go b/vendor/github.com/apache/arrow/go/arrow/array/util.go new file mode 100644 index 000000000000..c8d7e17c2bd8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/array/util.go @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/apache/arrow/go/arrow/arrio/arrio.go b/vendor/github.com/apache/arrow/go/arrow/arrio/arrio.go new file mode 100644 index 000000000000..60cfb78b106f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/arrio/arrio.go @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package arrio exposes functions to manipulate records, exposing and using +// interfaces not unlike the ones defined in the stdlib io package. +package arrio // import "github.com/apache/arrow/go/arrow/arrio" + +import ( + "io" + + "github.com/apache/arrow/go/arrow/array" +) + +// Reader is the interface that wraps the Read method. +type Reader interface { + // Read reads the current record from the underlying stream and an error, if any. + // When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). + Read() (array.Record, error) +} + +// ReaderAt is the interface that wraps the ReadAt method. +type ReaderAt interface { + // ReadAt reads the i-th record from the underlying stream and an error, if any. + ReadAt(i int64) (array.Record, error) +} + +// Writer is the interface that wraps the Write method. +type Writer interface { + Write(rec array.Record) error +} + +// Copy copies all the records available from src to dst. +// Copy returns the number of records copied and the first error +// encountered while copying, if any. +// +// A successful Copy returns err == nil, not err == EOF. Because Copy is +// defined to read from src until EOF, it does not treat an EOF from Read as an +// error to be reported. +func Copy(dst Writer, src Reader) (n int64, err error) { + for { + rec, err := src.Read() + if err != nil { + if err == io.EOF { + return n, nil + } + return n, err + } + err = dst.Write(rec) + if err != nil { + return n, err + } + n++ + } +} + +// CopyN copies n records (or until an error) from src to dst. It returns the +// number of records copied and the earliest error encountered while copying. On +// return, written == n if and only if err == nil. +func CopyN(dst Writer, src Reader, n int64) (written int64, err error) { + for ; written < n; written++ { + rec, err := src.Read() + if err != nil { + if err == io.EOF && written == n { + return written, nil + } + return written, err + } + err = dst.Write(rec) + if err != nil { + return written, err + } + } + + if written != n && err == nil { + err = io.EOF + } + return written, err +} diff --git a/vendor/github.com/apache/arrow/go/arrow/bitutil/bitutil.go b/vendor/github.com/apache/arrow/go/arrow/bitutil/bitutil.go new file mode 100644 index 000000000000..b547d618a5a8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/bitutil/bitutil.go @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutil + +import ( + "math/bits" + "reflect" + "unsafe" +) + +var ( + BitMask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + FlippedBitMask = [8]byte{254, 253, 251, 247, 239, 223, 191, 127} +) + +// IsMultipleOf8 returns whether v is a multiple of 8. +func IsMultipleOf8(v int64) bool { return v&7 == 0 } + +func BytesForBits(bits int64) int64 { return (bits + 7) >> 3 } + +// NextPowerOf2 rounds x to the next power of two. +func NextPowerOf2(x int) int { return 1 << uint(bits.Len(uint(x))) } + +// CeilByte rounds size to the next multiple of 8. +func CeilByte(size int) int { return (size + 7) &^ 7 } + +// CeilByte64 rounds size to the next multiple of 8. +func CeilByte64(size int64) int64 { return (size + 7) &^ 7 } + +// BitIsSet returns true if the bit at index i in buf is set (1). +func BitIsSet(buf []byte, i int) bool { return (buf[uint(i)/8] & BitMask[byte(i)%8]) != 0 } + +// BitIsNotSet returns true if the bit at index i in buf is not set (0). +func BitIsNotSet(buf []byte, i int) bool { return (buf[uint(i)/8] & BitMask[byte(i)%8]) == 0 } + +// SetBit sets the bit at index i in buf to 1. +func SetBit(buf []byte, i int) { buf[uint(i)/8] |= BitMask[byte(i)%8] } + +// ClearBit sets the bit at index i in buf to 0. +func ClearBit(buf []byte, i int) { buf[uint(i)/8] &= FlippedBitMask[byte(i)%8] } + +// SetBitTo sets the bit at index i in buf to val. +func SetBitTo(buf []byte, i int, val bool) { + if val { + SetBit(buf, i) + } else { + ClearBit(buf, i) + } +} + +// CountSetBits counts the number of 1's in buf up to n bits. +func CountSetBits(buf []byte, offset, n int) int { + if offset > 0 { + return countSetBitsWithOffset(buf, offset, n) + } + + count := 0 + + uint64Bytes := n / uint64SizeBits * 8 + for _, v := range bytesToUint64(buf[:uint64Bytes]) { + count += bits.OnesCount64(v) + } + + for _, v := range buf[uint64Bytes : n/8] { + count += bits.OnesCount8(v) + } + + // tail bits + for i := n &^ 0x7; i < n; i++ { + if BitIsSet(buf, i) { + count++ + } + } + + return count +} + +func countSetBitsWithOffset(buf []byte, offset, n int) int { + count := 0 + + beg := offset + end := offset + n + + begU8 := roundUp(beg, uint64SizeBits) + + init := min(n, begU8-beg) + for i := offset; i < beg+init; i++ { + if BitIsSet(buf, i) { + count++ + } + } + + nU64 := (n - init) / uint64SizeBits + begU64 := begU8 / uint64SizeBits + endU64 := begU64 + nU64 + bufU64 := bytesToUint64(buf) + if begU64 < len(bufU64) { + for _, v := range bufU64[begU64:endU64] { + count += bits.OnesCount64(v) + } + } + + // FIXME: use a fallback to bits.OnesCount8 + // before counting the tail bits. + + tail := beg + init + nU64*uint64SizeBits + for i := tail; i < end; i++ { + if BitIsSet(buf, i) { + count++ + } + } + + return count +} + +func roundUp(v, f int) int { + return (v + (f - 1)) / f * f +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +const ( + uint64SizeBytes = int(unsafe.Sizeof(uint64(0))) + uint64SizeBits = uint64SizeBytes * 8 +) + +func bytesToUint64(b []byte) []uint64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / uint64SizeBytes + s.Cap = h.Cap / uint64SizeBytes + + return res +} diff --git a/vendor/github.com/apache/arrow/go/arrow/compare.go b/vendor/github.com/apache/arrow/go/arrow/compare.go new file mode 100644 index 000000000000..c2ca4e321412 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/compare.go @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" +) + +type typeEqualsConfig struct { + metadata bool +} + +// TypeEqualOption is a functional option type used for configuring type +// equality checks. +type TypeEqualOption func(*typeEqualsConfig) + +// CheckMetadata is an option for TypeEqual that allows checking for metadata +// equality besides type equality. It only makes sense for STRUCT type. +func CheckMetadata() TypeEqualOption { + return func(cfg *typeEqualsConfig) { + cfg.metadata = true + } +} + +// TypeEqual checks if two DataType are the same, optionally checking metadata +// equality for STRUCT types. +func TypeEqual(left, right DataType, opts ...TypeEqualOption) bool { + var cfg typeEqualsConfig + for _, opt := range opts { + opt(&cfg) + } + + switch { + case left == nil || right == nil: + return false + case left.ID() != right.ID(): + return false + } + + // StructType is the only type that has metadata. + l, ok := left.(*StructType) + if !ok || cfg.metadata { + return reflect.DeepEqual(left, right) + } + + r := right.(*StructType) + switch { + case len(l.fields) != len(r.fields): + return false + case !reflect.DeepEqual(l.index, r.index): + return false + } + for i := range l.fields { + leftField, rightField := l.fields[i], r.fields[i] + switch { + case leftField.Name != rightField.Name: + return false + case leftField.Nullable != rightField.Nullable: + return false + case !TypeEqual(leftField.Type, rightField.Type, opts...): + return false + } + } + return true +} diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype.go b/vendor/github.com/apache/arrow/go/arrow/datatype.go new file mode 100644 index 000000000000..5a9936ec95b9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype.go @@ -0,0 +1,143 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +// Type is a logical type. They can be expressed as +// either a primitive physical type (bytes or bits of some fixed size), a +// nested type consisting of other data types, or another data type (e.g. a +// timestamp encoded as an int64) +type Type int + +const ( + // NULL type having no physical storage + NULL Type = iota + + // BOOL is a 1 bit, LSB bit-packed ordering + BOOL + + // UINT8 is an Unsigned 8-bit little-endian integer + UINT8 + + // INT8 is a Signed 8-bit little-endian integer + INT8 + + // UINT16 is an Unsigned 16-bit little-endian integer + UINT16 + + // INT16 is a Signed 16-bit little-endian integer + INT16 + + // UINT32 is an Unsigned 32-bit little-endian integer + UINT32 + + // INT32 is a Signed 32-bit little-endian integer + INT32 + + // UINT64 is an Unsigned 64-bit little-endian integer + UINT64 + + // INT64 is a Signed 64-bit little-endian integer + INT64 + + // FLOAT16 is a 2-byte floating point value + FLOAT16 + + // FLOAT32 is a 4-byte floating point value + FLOAT32 + + // FLOAT64 is an 8-byte floating point value + FLOAT64 + + // STRING is a UTF8 variable-length string + STRING + + // BINARY is a Variable-length byte type (no guarantee of UTF8-ness) + BINARY + + // FIXED_SIZE_BINARY is a binary where each value occupies the same number of bytes + FIXED_SIZE_BINARY + + // DATE32 is int32 days since the UNIX epoch + DATE32 + + // DATE64 is int64 milliseconds since the UNIX epoch + DATE64 + + // TIMESTAMP is an exact timestamp encoded with int64 since UNIX epoch + // Default unit millisecond + TIMESTAMP + + // TIME32 is a signed 32-bit integer, representing either seconds or + // milliseconds since midnight + TIME32 + + // TIME64 is a signed 64-bit integer, representing either microseconds or + // nanoseconds since midnight + TIME64 + + // INTERVAL is YEAR_MONTH or DAY_TIME interval in SQL style + INTERVAL + + // DECIMAL is a precision- and scale-based decimal type. Storage type depends on the + // parameters. + DECIMAL + + // LIST is a list of some logical data type + LIST + + // STRUCT of logical types + STRUCT + + // UNION of logical types + UNION + + // DICTIONARY aka Category type + DICTIONARY + + // MAP is a repeated struct logical type + MAP + + // Custom data type, implemented by user + EXTENSION + + // Fixed size list of some logical type + FIXED_SIZE_LIST + + // Measure of elapsed time in either seconds, milliseconds, microseconds + // or nanoseconds. + DURATION +) + +// DataType is the representation of an Arrow type. +type DataType interface { + ID() Type + // Name is name of the data type. + Name() string +} + +// FixedWidthDataType is the representation of an Arrow type that +// requires a fixed number of bits in memory for each element. +type FixedWidthDataType interface { + DataType + // BitWidth returns the number of bits required to store a single element of this data type in memory. + BitWidth() int +} + +type BinaryDataType interface { + DataType + binary() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_binary.go b/vendor/github.com/apache/arrow/go/arrow/datatype_binary.go new file mode 100644 index 000000000000..4fc0162734e8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_binary.go @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +type BinaryType struct{} + +func (t *BinaryType) ID() Type { return BINARY } +func (t *BinaryType) Name() string { return "binary" } +func (t *BinaryType) String() string { return "binary" } +func (t *BinaryType) binary() {} + +type StringType struct{} + +func (t *StringType) ID() Type { return STRING } +func (t *StringType) Name() string { return "utf8" } +func (t *StringType) String() string { return "utf8" } +func (t *StringType) binary() {} + +var ( + BinaryTypes = struct { + Binary BinaryDataType + String BinaryDataType + }{ + Binary: &BinaryType{}, + String: &StringType{}, + } +) diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_fixedwidth.go b/vendor/github.com/apache/arrow/go/arrow/datatype_fixedwidth.go new file mode 100644 index 000000000000..79bb878f36ae --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_fixedwidth.go @@ -0,0 +1,213 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "strconv" +) + +type BooleanType struct{} + +func (t *BooleanType) ID() Type { return BOOL } +func (t *BooleanType) Name() string { return "bool" } +func (t *BooleanType) String() string { return "bool" } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *BooleanType) BitWidth() int { return 1 } + +type FixedSizeBinaryType struct { + ByteWidth int +} + +func (*FixedSizeBinaryType) ID() Type { return FIXED_SIZE_BINARY } +func (*FixedSizeBinaryType) Name() string { return "fixed_size_binary" } +func (t *FixedSizeBinaryType) BitWidth() int { return 8 * t.ByteWidth } + +func (t *FixedSizeBinaryType) String() string { + return "fixed_size_binary[" + strconv.Itoa(t.ByteWidth) + "]" +} + +type ( + Timestamp int64 + Time32 int32 + Time64 int64 + TimeUnit int + Date32 int32 + Date64 int64 + Duration int64 +) + +const ( + Nanosecond TimeUnit = iota + Microsecond + Millisecond + Second +) + +func (u TimeUnit) String() string { return [...]string{"ns", "us", "ms", "s"}[uint(u)&3] } + +// TimestampType is encoded as a 64-bit signed integer since the UNIX epoch (2017-01-01T00:00:00Z). +// The zero-value is a nanosecond and time zone neutral. Time zone neutral can be +// considered UTC without having "UTC" as a time zone. +type TimestampType struct { + Unit TimeUnit + TimeZone string +} + +func (*TimestampType) ID() Type { return TIMESTAMP } +func (*TimestampType) Name() string { return "timestamp" } +func (t *TimestampType) String() string { + switch len(t.TimeZone) { + case 0: + return "timestamp[" + t.Unit.String() + "]" + default: + return "timestamp[" + t.Unit.String() + ", tz=" + t.TimeZone + "]" + } +} + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (*TimestampType) BitWidth() int { return 64 } + +// Time32Type is encoded as a 32-bit signed integer, representing either seconds or milliseconds since midnight. +type Time32Type struct { + Unit TimeUnit +} + +func (*Time32Type) ID() Type { return TIME32 } +func (*Time32Type) Name() string { return "time32" } +func (*Time32Type) BitWidth() int { return 32 } +func (t *Time32Type) String() string { return "time32[" + t.Unit.String() + "]" } + +// Time64Type is encoded as a 64-bit signed integer, representing either microseconds or nanoseconds since midnight. +type Time64Type struct { + Unit TimeUnit +} + +func (*Time64Type) ID() Type { return TIME64 } +func (*Time64Type) Name() string { return "time64" } +func (*Time64Type) BitWidth() int { return 64 } +func (t *Time64Type) String() string { return "time64[" + t.Unit.String() + "]" } + +// DurationType is encoded as a 64-bit signed integer, representing an amount +// of elapsed time without any relation to a calendar artifact. +type DurationType struct { + Unit TimeUnit +} + +func (*DurationType) ID() Type { return DURATION } +func (*DurationType) Name() string { return "duration" } +func (*DurationType) BitWidth() int { return 64 } +func (t *DurationType) String() string { return "duration[" + t.Unit.String() + "]" } + +// Float16Type represents a floating point value encoded with a 16-bit precision. +type Float16Type struct{} + +func (t *Float16Type) ID() Type { return FLOAT16 } +func (t *Float16Type) Name() string { return "float16" } +func (t *Float16Type) String() string { return "float16" } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *Float16Type) BitWidth() int { return 16 } + +// Decimal128Type represents a fixed-size 128-bit decimal type. +type Decimal128Type struct { + Precision int32 + Scale int32 +} + +func (*Decimal128Type) ID() Type { return DECIMAL } +func (*Decimal128Type) Name() string { return "decimal" } +func (*Decimal128Type) BitWidth() int { return 16 } +func (t *Decimal128Type) String() string { + return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale) +} + +// MonthInterval represents a number of months. +type MonthInterval int32 + +// MonthIntervalType is encoded as a 32-bit signed integer, +// representing a number of months. +type MonthIntervalType struct{} + +func (*MonthIntervalType) ID() Type { return INTERVAL } +func (*MonthIntervalType) Name() string { return "month_interval" } +func (*MonthIntervalType) String() string { return "month_interval" } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *MonthIntervalType) BitWidth() int { return 32 } + +// DayTimeInterval represents a number of days and milliseconds (fraction of day). +type DayTimeInterval struct { + Days int32 `json:"days"` + Milliseconds int32 `json:"milliseconds"` +} + +// DayTimeIntervalType is encoded as a pair of 32-bit signed integer, +// representing a number of days and milliseconds (fraction of day). +type DayTimeIntervalType struct{} + +func (*DayTimeIntervalType) ID() Type { return INTERVAL } +func (*DayTimeIntervalType) Name() string { return "day_time_interval" } +func (*DayTimeIntervalType) String() string { return "day_time_interval" } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *DayTimeIntervalType) BitWidth() int { return 64 } + +var ( + FixedWidthTypes = struct { + Boolean FixedWidthDataType + Date32 FixedWidthDataType + Date64 FixedWidthDataType + DayTimeInterval FixedWidthDataType + Duration_s FixedWidthDataType + Duration_ms FixedWidthDataType + Duration_us FixedWidthDataType + Duration_ns FixedWidthDataType + Float16 FixedWidthDataType + MonthInterval FixedWidthDataType + Time32s FixedWidthDataType + Time32ms FixedWidthDataType + Time64us FixedWidthDataType + Time64ns FixedWidthDataType + Timestamp_s FixedWidthDataType + Timestamp_ms FixedWidthDataType + Timestamp_us FixedWidthDataType + Timestamp_ns FixedWidthDataType + }{ + Boolean: &BooleanType{}, + Date32: &Date32Type{}, + Date64: &Date64Type{}, + DayTimeInterval: &DayTimeIntervalType{}, + Duration_s: &DurationType{Unit: Second}, + Duration_ms: &DurationType{Unit: Millisecond}, + Duration_us: &DurationType{Unit: Microsecond}, + Duration_ns: &DurationType{Unit: Nanosecond}, + Float16: &Float16Type{}, + MonthInterval: &MonthIntervalType{}, + Time32s: &Time32Type{Unit: Second}, + Time32ms: &Time32Type{Unit: Millisecond}, + Time64us: &Time64Type{Unit: Microsecond}, + Time64ns: &Time64Type{Unit: Nanosecond}, + Timestamp_s: &TimestampType{Unit: Second, TimeZone: "UTC"}, + Timestamp_ms: &TimestampType{Unit: Millisecond, TimeZone: "UTC"}, + Timestamp_us: &TimestampType{Unit: Microsecond, TimeZone: "UTC"}, + Timestamp_ns: &TimestampType{Unit: Nanosecond, TimeZone: "UTC"}, + } + + _ FixedWidthDataType = (*FixedSizeBinaryType)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_nested.go b/vendor/github.com/apache/arrow/go/arrow/datatype_nested.go new file mode 100644 index 000000000000..c6173fe87cb8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_nested.go @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "reflect" + "strings" +) + +// ListType describes a nested type in which each array slot contains +// a variable-size sequence of values, all having the same relative type. +type ListType struct { + elem DataType // DataType of the list's elements +} + +// ListOf returns the list type with element type t. +// For example, if t represents int32, ListOf(t) represents []int32. +// +// ListOf panics if t is nil or invalid. +func ListOf(t DataType) *ListType { + if t == nil { + panic("arrow: nil DataType") + } + return &ListType{elem: t} +} + +func (*ListType) ID() Type { return LIST } +func (*ListType) Name() string { return "list" } +func (t *ListType) String() string { return fmt.Sprintf("list", t.elem) } + +// Elem returns the ListType's element type. +func (t *ListType) Elem() DataType { return t.elem } + +// FixedSizeListType describes a nested type in which each array slot contains +// a fixed-size sequence of values, all having the same relative type. +type FixedSizeListType struct { + n int32 // number of elements in the list + elem DataType // DataType of the list's elements +} + +// FixedSizeListOf returns the list type with element type t. +// For example, if t represents int32, FixedSizeListOf(10, t) represents [10]int32. +// +// FixedSizeListOf panics if t is nil or invalid. +// FixedSizeListOf panics if n is <= 0. +func FixedSizeListOf(n int32, t DataType) *FixedSizeListType { + if t == nil { + panic("arrow: nil DataType") + } + if n <= 0 { + panic("arrow: invalid size") + } + return &FixedSizeListType{elem: t, n: n} +} + +func (*FixedSizeListType) ID() Type { return FIXED_SIZE_LIST } +func (*FixedSizeListType) Name() string { return "fixed_size_list" } +func (t *FixedSizeListType) String() string { + return fmt.Sprintf("fixed_size_list[%d]", t.elem, t.n) +} + +// Elem returns the FixedSizeListType's element type. +func (t *FixedSizeListType) Elem() DataType { return t.elem } + +// Len returns the FixedSizeListType's size. +func (t *FixedSizeListType) Len() int32 { return t.n } + +// StructType describes a nested type parameterized by an ordered sequence +// of relative types, called its fields. +type StructType struct { + fields []Field + index map[string]int + meta Metadata +} + +// StructOf returns the struct type with fields fs. +// +// StructOf panics if there are duplicated fields. +// StructOf panics if there is a field with an invalid DataType. +func StructOf(fs ...Field) *StructType { + n := len(fs) + if n == 0 { + return &StructType{} + } + + t := &StructType{ + fields: make([]Field, n), + index: make(map[string]int, n), + } + for i, f := range fs { + if f.Type == nil { + panic("arrow: field with nil DataType") + } + t.fields[i] = Field{ + Name: f.Name, + Type: f.Type, + Nullable: f.Nullable, + Metadata: f.Metadata.clone(), + } + if _, dup := t.index[f.Name]; dup { + panic(fmt.Errorf("arrow: duplicate field with name %q", f.Name)) + } + t.index[f.Name] = i + } + + return t +} + +func (*StructType) ID() Type { return STRUCT } +func (*StructType) Name() string { return "struct" } + +func (t *StructType) String() string { + o := new(strings.Builder) + o.WriteString("struct<") + for i, f := range t.fields { + if i > 0 { + o.WriteString(", ") + } + o.WriteString(fmt.Sprintf("%s: %v", f.Name, f.Type)) + } + o.WriteString(">") + return o.String() +} + +func (t *StructType) Fields() []Field { return t.fields } +func (t *StructType) Field(i int) Field { return t.fields[i] } + +func (t *StructType) FieldByName(name string) (Field, bool) { + i, ok := t.index[name] + if !ok { + return Field{}, false + } + return t.fields[i], true +} + +type Field struct { + Name string // Field name + Type DataType // The field's data type + Nullable bool // Fields can be nullable + Metadata Metadata // The field's metadata, if any +} + +func (f Field) HasMetadata() bool { return f.Metadata.Len() != 0 } + +func (f Field) Equal(o Field) bool { + return reflect.DeepEqual(f, o) +} + +func (f Field) String() string { + o := new(strings.Builder) + nullable := "" + if f.Nullable { + nullable = ", nullable" + } + fmt.Fprintf(o, "%s: type=%v%v", f.Name, f.Type, nullable) + if f.HasMetadata() { + fmt.Fprintf(o, "\n%*.smetadata: %v", len(f.Name)+2, "", f.Metadata) + } + return o.String() +} + +var ( + _ DataType = (*ListType)(nil) + _ DataType = (*StructType)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_null.go b/vendor/github.com/apache/arrow/go/arrow/datatype_null.go new file mode 100644 index 000000000000..5882dfe39b1f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_null.go @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +// NullType describes a degenerate array, with zero physical storage. +type NullType struct{} + +func (*NullType) ID() Type { return NULL } +func (*NullType) Name() string { return "null" } +func (*NullType) String() string { return "null" } + +var ( + Null *NullType + _ DataType = Null +) diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go b/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go new file mode 100644 index 000000000000..d5b34d7d4fa4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go @@ -0,0 +1,134 @@ +// Code generated by datatype_numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +type Int8Type struct{} + +func (t *Int8Type) ID() Type { return INT8 } +func (t *Int8Type) Name() string { return "int8" } +func (t *Int8Type) String() string { return "int8" } +func (t *Int8Type) BitWidth() int { return 8 } + +type Int16Type struct{} + +func (t *Int16Type) ID() Type { return INT16 } +func (t *Int16Type) Name() string { return "int16" } +func (t *Int16Type) String() string { return "int16" } +func (t *Int16Type) BitWidth() int { return 16 } + +type Int32Type struct{} + +func (t *Int32Type) ID() Type { return INT32 } +func (t *Int32Type) Name() string { return "int32" } +func (t *Int32Type) String() string { return "int32" } +func (t *Int32Type) BitWidth() int { return 32 } + +type Int64Type struct{} + +func (t *Int64Type) ID() Type { return INT64 } +func (t *Int64Type) Name() string { return "int64" } +func (t *Int64Type) String() string { return "int64" } +func (t *Int64Type) BitWidth() int { return 64 } + +type Uint8Type struct{} + +func (t *Uint8Type) ID() Type { return UINT8 } +func (t *Uint8Type) Name() string { return "uint8" } +func (t *Uint8Type) String() string { return "uint8" } +func (t *Uint8Type) BitWidth() int { return 8 } + +type Uint16Type struct{} + +func (t *Uint16Type) ID() Type { return UINT16 } +func (t *Uint16Type) Name() string { return "uint16" } +func (t *Uint16Type) String() string { return "uint16" } +func (t *Uint16Type) BitWidth() int { return 16 } + +type Uint32Type struct{} + +func (t *Uint32Type) ID() Type { return UINT32 } +func (t *Uint32Type) Name() string { return "uint32" } +func (t *Uint32Type) String() string { return "uint32" } +func (t *Uint32Type) BitWidth() int { return 32 } + +type Uint64Type struct{} + +func (t *Uint64Type) ID() Type { return UINT64 } +func (t *Uint64Type) Name() string { return "uint64" } +func (t *Uint64Type) String() string { return "uint64" } +func (t *Uint64Type) BitWidth() int { return 64 } + +type Float32Type struct{} + +func (t *Float32Type) ID() Type { return FLOAT32 } +func (t *Float32Type) Name() string { return "float32" } +func (t *Float32Type) String() string { return "float32" } +func (t *Float32Type) BitWidth() int { return 32 } + +type Float64Type struct{} + +func (t *Float64Type) ID() Type { return FLOAT64 } +func (t *Float64Type) Name() string { return "float64" } +func (t *Float64Type) String() string { return "float64" } +func (t *Float64Type) BitWidth() int { return 64 } + +type Date32Type struct{} + +func (t *Date32Type) ID() Type { return DATE32 } +func (t *Date32Type) Name() string { return "date32" } +func (t *Date32Type) String() string { return "date32" } +func (t *Date32Type) BitWidth() int { return 32 } + +type Date64Type struct{} + +func (t *Date64Type) ID() Type { return DATE64 } +func (t *Date64Type) Name() string { return "date64" } +func (t *Date64Type) String() string { return "date64" } +func (t *Date64Type) BitWidth() int { return 64 } + +var ( + PrimitiveTypes = struct { + Int8 DataType + Int16 DataType + Int32 DataType + Int64 DataType + Uint8 DataType + Uint16 DataType + Uint32 DataType + Uint64 DataType + Float32 DataType + Float64 DataType + Date32 DataType + Date64 DataType + }{ + + Int8: &Int8Type{}, + Int16: &Int16Type{}, + Int32: &Int32Type{}, + Int64: &Int64Type{}, + Uint8: &Uint8Type{}, + Uint16: &Uint16Type{}, + Uint32: &Uint32Type{}, + Uint64: &Uint64Type{}, + Float32: &Float32Type{}, + Float64: &Float64Type{}, + Date32: &Date32Type{}, + Date64: &Date64Type{}, + } +) diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go.tmpl new file mode 100644 index 000000000000..193cf09e45d3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go.tmpl @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +{{range .In}} +type {{.Name}}Type struct {} + +func (t *{{.Name}}Type) ID() Type { return {{.Name|upper}} } +func (t *{{.Name}}Type) Name() string { return "{{.Name|lower}}" } +func (t *{{.Name}}Type) String() string { return "{{.Name|lower}}" } +func (t *{{.Name}}Type) BitWidth() int { return {{.Size}} } + + +{{end}} + +var ( + PrimitiveTypes = struct { +{{range .In}} + {{.Name}} DataType +{{- end}} + }{ +{{range .In}} + {{.Name}}: &{{.Name}}Type{}, +{{- end}} + } +) diff --git a/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go.tmpldata new file mode 100644 index 000000000000..12e69fe60c0b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/datatype_numeric.gen.go.tmpldata @@ -0,0 +1,66 @@ +[ + { + "Name": "Int8", + "Type": "int8", + "Size": 8 + }, + { + "Name": "Int16", + "Type": "int16", + "Size": 16 + }, + { + "Name": "Int32", + "Type": "int32", + "Size": 32 + }, + { + "Name": "Int64", + "Type": "int64", + "Size": 64 + }, + { + "Name": "Uint8", + "Type": "uint8", + "Size": 8 + }, + { + "Name": "Uint16", + "Type": "uint16", + "Size": 16 + }, + { + "Name": "Uint32", + "Type": "uint32", + "Size": 32 + }, + { + "Name": "Uint64", + "Type": "uint64", + "Size": 64 + }, + { + "Name": "Float32", + "Type": "float32", + "Size": 32 + }, + { + "Name": "Float64", + "Type": "float64", + "Size": 64 + }, + { + "Name": "Date32", + "Type": "date32", + "QualifiedType": "arrow.Date32", + "InternalType": "int32", + "Size": 32 + }, + { + "Name": "Date64", + "Type": "date64", + "QualifiedType": "arrow.Date64", + "InternalType": "int64", + "Size": 64 + } +] diff --git a/vendor/github.com/apache/arrow/go/arrow/decimal128/decimal128.go b/vendor/github.com/apache/arrow/go/arrow/decimal128/decimal128.go new file mode 100644 index 000000000000..2f1b181772d0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/decimal128/decimal128.go @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package decimal128 // import "github.com/apache/arrow/go/arrow/decimal128" + +var ( + MaxDecimal128 = New(542101086242752217, 687399551400673280-1) +) + +// Num represents a signed 128-bit integer in two's complement. +// Calculations wrap around and overflow is ignored. +// +// For a discussion of the algorithms, look at Knuth's volume 2, +// Semi-numerical Algorithms section 4.3.1. +// +// Adapted from the Apache ORC C++ implementation +type Num struct { + lo uint64 // low bits + hi int64 // high bits +} + +// New returns a new signed 128-bit integer value. +func New(hi int64, lo uint64) Num { + return Num{lo: lo, hi: hi} +} + +// FromU64 returns a new signed 128-bit integer value from the provided uint64 one. +func FromU64(v uint64) Num { + return New(0, v) +} + +// FromI64 returns a new signed 128-bit integer value from the provided int64 one. +func FromI64(v int64) Num { + switch { + case v > 0: + return New(0, uint64(v)) + case v < 0: + return New(-1, uint64(v)) + default: + return Num{} + } +} + +// LowBits returns the low bits of the two's complement representation of the number. +func (n Num) LowBits() uint64 { return n.lo } + +// HighBits returns the high bits of the two's complement representation of the number. +func (n Num) HighBits() int64 { return n.hi } + +// Sign returns: +// +// -1 if x < 0 +// 0 if x == 0 +// +1 if x > 0 +func (n Num) Sign() int { + if n == (Num{}) { + return 0 + } + return int(1 | (n.hi >> 63)) +} diff --git a/vendor/github.com/apache/arrow/go/arrow/doc.go b/vendor/github.com/apache/arrow/go/arrow/doc.go new file mode 100644 index 000000000000..10ddda97cbbf --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/doc.go @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package arrow provides an implementation of Apache Arrow. + +Apache Arrow is a cross-language development platform for in-memory data. It specifies a standardized +language-independent columnar memory format for flat and hierarchical data, organized for efficient analytic +operations on modern hardware. It also provides computational libraries and zero-copy streaming +messaging and inter-process communication. + +Basics + +The fundamental data structure in Arrow is an Array, which holds a sequence of values of the same type. An array +consists of memory holding the data and an additional validity bitmap that indicates if the corresponding entry in the +array is valid (not null). If the array has no null entries, it is possible to omit this bitmap. + +*/ +package arrow + +//go:generate go run _tools/tmpl/main.go -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl +//go:generate go run _tools/tmpl/main.go -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl tensor/numeric.gen.go.tmpl tensor/numeric.gen_test.go.tmpl +//go:generate go run ./gen-flatbuffers.go + +// stringer +//go:generate stringer -type=Type diff --git a/vendor/github.com/apache/arrow/go/arrow/float16/float16.go b/vendor/github.com/apache/arrow/go/arrow/float16/float16.go new file mode 100644 index 000000000000..e15ee2c20d76 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/float16/float16.go @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package float16 // import "github.com/apache/arrow/go/arrow/float16" + +import ( + "math" + "strconv" +) + +// Num represents a half-precision floating point value (float16) +// stored on 16 bits. +// +// See https://en.wikipedia.org/wiki/Half-precision_floating-point_format for more informations. +type Num struct { + bits uint16 +} + +// New creates a new half-precision floating point value from the provided +// float32 value. +func New(f float32) Num { + b := math.Float32bits(f) + sn := uint16((b >> 31) & 0x1) + exp := (b >> 23) & 0xff + res := int16(exp) - 127 + 15 + fc := uint16(b>>13) & 0x3ff + switch { + case exp == 0: + res = 0 + case exp == 0xff: + res = 0x1f + case res > 0x1e: + res = 0x1f + fc = 0 + case res < 0x01: + res = 0 + fc = 0 + } + return Num{bits: (sn << 15) | uint16(res<<10) | fc} +} + +func (f Num) Float32() float32 { + sn := uint32((f.bits >> 15) & 0x1) + exp := (f.bits >> 10) & 0x1f + res := uint32(exp) + 127 - 15 + fc := uint32(f.bits & 0x3ff) + switch { + case exp == 0: + res = 0 + case exp == 0x1f: + res = 0xff + } + return math.Float32frombits((sn << 31) | (res << 23) | (fc << 13)) +} + +func (f Num) Uint16() uint16 { return f.bits } +func (f Num) String() string { return strconv.FormatFloat(float64(f.Float32()), 'g', -1, 32) } diff --git a/vendor/github.com/apache/arrow/go/arrow/go.mod b/vendor/github.com/apache/arrow/go/arrow/go.mod new file mode 100644 index 000000000000..a83f7cc2fdbe --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/go.mod @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +module github.com/apache/arrow/go/arrow + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/google/flatbuffers v1.11.0 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.0 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) diff --git a/vendor/github.com/apache/arrow/go/arrow/go.sum b/vendor/github.com/apache/arrow/go/arrow/go.sum new file mode 100644 index 000000000000..25e38a1bb4a9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.0 h1:LThGCOvhuJic9Gyd1VBCkhyUXmO8vKaBFvBsJ2k03rg= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/cpu/README.md b/vendor/github.com/apache/arrow/go/arrow/internal/cpu/README.md new file mode 100644 index 000000000000..2619b38c11ec --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/cpu/README.md @@ -0,0 +1,42 @@ + + +# Package cpu + +Copied from Go src/internal/cpu + +## Extras + +### Intel + +The `INTEL_DISABLE_EXT` environment variable can control which CPU extensions are available for +the running process. It should be a comma-separate list of upper-case strings as follows + +| Flag | Description | +| -------- | ----------- | +| `ALL` | Disable all CPU extensions and fall back to Go implementation | +| `AVX2` | Disable AVX2 optimizations | +| `AVX` | Disable AVX optimizations | +| `SSE` | Disable all SSE optimizations | +| `SSE4` | Disable SSE42, SSE41 optimizations | +| `SSSE3` | Disable supplemental SSE3 optimizations | +| `SSE3` | Disable SSE3 optimizations | +| `SSE2` | Disable SSE2 optimizations | + +Any unrecognized flags will be ignored and therefore it is possible to leave the environment variable with a bogus value such as `NONE` when experimenting. \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/cpu/cpu.go b/vendor/github.com/apache/arrow/go/arrow/internal/cpu/cpu.go new file mode 100644 index 000000000000..22fc561002d8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/cpu/cpu.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection +// used by the Go standard library. +package cpu + +var X86 x86 + +// The booleans in x86 contain the correspondingly named cpuid feature bit. +// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers +// in addition to the cpuid feature bit being set. +// The struct is padded to avoid false sharing. +type x86 struct { + _ [CacheLineSize]byte + HasAES bool + HasADX bool + HasAVX bool + HasAVX2 bool + HasBMI1 bool + HasBMI2 bool + HasERMS bool + HasFMA bool + HasOSXSAVE bool + HasPCLMULQDQ bool + HasPOPCNT bool + HasSSE2 bool + HasSSE3 bool + HasSSSE3 bool + HasSSE41 bool + HasSSE42 bool + _ [CacheLineSize]byte +} + +var PPC64 ppc64 + +// For ppc64x, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (darn, scv), so there are capability bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07), so we +// maintain some of the old capability checks for optional categories for +// safety. +// The struct is padded to avoid false sharing. +type ppc64 struct { + _ [CacheLineSize]byte + HasVMX bool // Vector unit (Altivec) + HasDFP bool // Decimal Floating Point unit + HasVSX bool // Vector-scalar unit + HasHTM bool // Hardware Transactional Memory + HasISEL bool // Integer select + HasVCRYPTO bool // Vector cryptography + HasHTMNOSC bool // HTM: kernel-aborted transaction in syscalls + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ [CacheLineSize]byte +} + +var ARM64 arm64 + +// The booleans in arm64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +type arm64 struct { + _ [CacheLineSize]byte + HasFP bool + HasASIMD bool + HasEVTSTRM bool + HasAES bool + HasPMULL bool + HasSHA1 bool + HasSHA2 bool + HasCRC32 bool + HasATOMICS bool + _ [CacheLineSize]byte +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/cpu/cpu_x86.go b/vendor/github.com/apache/arrow/go/arrow/internal/cpu/cpu_x86.go new file mode 100644 index 000000000000..777269f1a252 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/cpu/cpu_x86.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +import ( + "os" + "strings" +) + +const CacheLineSize = 64 + +// cpuid is implemented in cpu_x86.s. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s. +func xgetbv() (eax, edx uint32) + +func init() { + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasADX = isSet(19, ebx7) + + // NOTE(sgc): added ability to disable extension via environment + checkEnvironment() +} +func checkEnvironment() { + if ext, ok := os.LookupEnv("INTEL_DISABLE_EXT"); ok { + exts := strings.Split(ext, ",") + + for _, x := range exts { + switch x { + case "ALL": + X86.HasAVX2 = false + X86.HasAVX = false + X86.HasSSE42 = false + X86.HasSSE41 = false + X86.HasSSSE3 = false + X86.HasSSE3 = false + X86.HasSSE2 = false + + case "AVX2": + X86.HasAVX2 = false + case "AVX": + X86.HasAVX = false + case "SSE": + X86.HasSSE42 = false + X86.HasSSE41 = false + X86.HasSSSE3 = false + X86.HasSSE3 = false + X86.HasSSE2 = false + case "SSE4": + X86.HasSSE42 = false + X86.HasSSE41 = false + case "SSSE3": + X86.HasSSSE3 = false + case "SSE3": + X86.HasSSE3 = false + case "SSE2": + X86.HasSSE2 = false + } + } + } +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1< with values [[1, 2, 3], null, [4], [5, 6], null] +/// would have {length: 5, null_count: 2} for its List node, and {length: 6, +/// null_count: 0} for its Int16 node, as separate FieldNode structs +type FieldNode struct { + _tab flatbuffers.Struct +} + +func (rcv *FieldNode) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FieldNode) Table() flatbuffers.Table { + return rcv._tab.Table +} + +/// The number of value slots in the Arrow array at this level of a nested +/// tree +func (rcv *FieldNode) Length() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) +} +/// The number of value slots in the Arrow array at this level of a nested +/// tree +func (rcv *FieldNode) MutateLength(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) +} + +/// The number of observed nulls. Fields with null_count == 0 may choose not +/// to write their physical validity bitmap out as a materialized buffer, +/// instead setting the length of the bitmap buffer to 0. +func (rcv *FieldNode) NullCount() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) +} +/// The number of observed nulls. Fields with null_count == 0 may choose not +/// to write their physical validity bitmap out as a materialized buffer, +/// instead setting the length of the bitmap buffer to 0. +func (rcv *FieldNode) MutateNullCount(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) +} + +func CreateFieldNode(builder *flatbuffers.Builder, length int64, nullCount int64) flatbuffers.UOffsetT { + builder.Prep(8, 16) + builder.PrependInt64(nullCount) + builder.PrependInt64(length) + return builder.Offset() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FixedSizeBinary.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FixedSizeBinary.go new file mode 100644 index 000000000000..4e660d5077f7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FixedSizeBinary.go @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FixedSizeBinary struct { + _tab flatbuffers.Table +} + +func GetRootAsFixedSizeBinary(buf []byte, offset flatbuffers.UOffsetT) *FixedSizeBinary { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FixedSizeBinary{} + x.Init(buf, n+offset) + return x +} + +func (rcv *FixedSizeBinary) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FixedSizeBinary) Table() flatbuffers.Table { + return rcv._tab +} + +/// Number of bytes per value +func (rcv *FixedSizeBinary) ByteWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +/// Number of bytes per value +func (rcv *FixedSizeBinary) MutateByteWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +func FixedSizeBinaryStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FixedSizeBinaryAddByteWidth(builder *flatbuffers.Builder, byteWidth int32) { + builder.PrependInt32Slot(0, byteWidth, 0) +} +func FixedSizeBinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FixedSizeList.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FixedSizeList.go new file mode 100644 index 000000000000..dabf5cc8581d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FixedSizeList.go @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FixedSizeList struct { + _tab flatbuffers.Table +} + +func GetRootAsFixedSizeList(buf []byte, offset flatbuffers.UOffsetT) *FixedSizeList { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FixedSizeList{} + x.Init(buf, n+offset) + return x +} + +func (rcv *FixedSizeList) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FixedSizeList) Table() flatbuffers.Table { + return rcv._tab +} + +/// Number of list items per value +func (rcv *FixedSizeList) ListSize() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +/// Number of list items per value +func (rcv *FixedSizeList) MutateListSize(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +func FixedSizeListStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FixedSizeListAddListSize(builder *flatbuffers.Builder, listSize int32) { + builder.PrependInt32Slot(0, listSize, 0) +} +func FixedSizeListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FloatingPoint.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FloatingPoint.go new file mode 100644 index 000000000000..975a52dbe0a5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/FloatingPoint.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FloatingPoint struct { + _tab flatbuffers.Table +} + +func GetRootAsFloatingPoint(buf []byte, offset flatbuffers.UOffsetT) *FloatingPoint { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FloatingPoint{} + x.Init(buf, n+offset) + return x +} + +func (rcv *FloatingPoint) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FloatingPoint) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *FloatingPoint) Precision() Precision { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *FloatingPoint) MutatePrecision(n Precision) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func FloatingPointStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FloatingPointAddPrecision(builder *flatbuffers.Builder, precision int16) { + builder.PrependInt16Slot(0, precision, 0) +} +func FloatingPointEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Footer.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Footer.go new file mode 100644 index 000000000000..6802e77c0328 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Footer.go @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// Arrow File metadata +/// +type Footer struct { + _tab flatbuffers.Table +} + +func GetRootAsFooter(buf []byte, offset flatbuffers.UOffsetT) *Footer { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Footer{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Footer) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Footer) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Footer) Version() MetadataVersion { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Footer) MutateVersion(n MetadataVersion) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func (rcv *Footer) Schema(obj *Schema) *Schema { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Schema) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *Footer) Dictionaries(obj *Block, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 24 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Footer) DictionariesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *Footer) RecordBatches(obj *Block, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 24 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Footer) RecordBatchesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func FooterStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func FooterAddVersion(builder *flatbuffers.Builder, version int16) { + builder.PrependInt16Slot(0, version, 0) +} +func FooterAddSchema(builder *flatbuffers.Builder, schema flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(schema), 0) +} +func FooterAddDictionaries(builder *flatbuffers.Builder, dictionaries flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(dictionaries), 0) +} +func FooterStartDictionariesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(24, numElems, 8) +} +func FooterAddRecordBatches(builder *flatbuffers.Builder, recordBatches flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(recordBatches), 0) +} +func FooterStartRecordBatchesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(24, numElems, 8) +} +func FooterEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Int.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Int.go new file mode 100644 index 000000000000..9f4b1911705c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Int.go @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Int struct { + _tab flatbuffers.Table +} + +func GetRootAsInt(buf []byte, offset flatbuffers.UOffsetT) *Int { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Int{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Int) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Int) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Int) BitWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Int) MutateBitWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +func (rcv *Int) IsSigned() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +func (rcv *Int) MutateIsSigned(n bool) bool { + return rcv._tab.MutateBoolSlot(6, n) +} + +func IntStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func IntAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) { + builder.PrependInt32Slot(0, bitWidth, 0) +} +func IntAddIsSigned(builder *flatbuffers.Builder, isSigned bool) { + builder.PrependBoolSlot(1, isSigned, false) +} +func IntEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Interval.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Interval.go new file mode 100644 index 000000000000..21970ef10801 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Interval.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Interval struct { + _tab flatbuffers.Table +} + +func GetRootAsInterval(buf []byte, offset flatbuffers.UOffsetT) *Interval { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Interval{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Interval) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Interval) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Interval) Unit() IntervalUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Interval) MutateUnit(n IntervalUnit) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func IntervalStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func IntervalAddUnit(builder *flatbuffers.Builder, unit int16) { + builder.PrependInt16Slot(0, unit, 0) +} +func IntervalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/IntervalUnit.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/IntervalUnit.go new file mode 100644 index 000000000000..62baea99117f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/IntervalUnit.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +type IntervalUnit = int16 +const ( + IntervalUnitYEAR_MONTH IntervalUnit = 0 + IntervalUnitDAY_TIME IntervalUnit = 1 +) + +var EnumNamesIntervalUnit = map[IntervalUnit]string{ + IntervalUnitYEAR_MONTH:"YEAR_MONTH", + IntervalUnitDAY_TIME:"DAY_TIME", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/KeyValue.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/KeyValue.go new file mode 100644 index 000000000000..c1b85318ecd5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/KeyValue.go @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// user defined key value pairs to add custom metadata to arrow +/// key namespacing is the responsibility of the user +type KeyValue struct { + _tab flatbuffers.Table +} + +func GetRootAsKeyValue(buf []byte, offset flatbuffers.UOffsetT) *KeyValue { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &KeyValue{} + x.Init(buf, n+offset) + return x +} + +func (rcv *KeyValue) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *KeyValue) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *KeyValue) Key() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *KeyValue) Value() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func KeyValueStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func KeyValueAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) +} +func KeyValueAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0) +} +func KeyValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/List.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/List.go new file mode 100644 index 000000000000..ba84319d3f69 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/List.go @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type List struct { + _tab flatbuffers.Table +} + +func GetRootAsList(buf []byte, offset flatbuffers.UOffsetT) *List { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &List{} + x.Init(buf, n+offset) + return x +} + +func (rcv *List) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *List) Table() flatbuffers.Table { + return rcv._tab +} + +func ListStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func ListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Map.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Map.go new file mode 100644 index 000000000000..d540fc757033 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Map.go @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A Map is a logical nested type that is represented as +/// +/// List> +/// +/// In this layout, the keys and values are each respectively contiguous. We do +/// not constrain the key and value types, so the application is responsible +/// for ensuring that the keys are hashable and unique. Whether the keys are sorted +/// may be set in the metadata for this field. +/// +/// In a field with Map type, the field has a child Struct field, which then +/// has two children: key type and the second the value type. The names of the +/// child fields may be respectively "entries", "key", and "value", but this is +/// not enforced. +/// +/// Map +/// - child[0] entries: Struct +/// - child[0] key: K +/// - child[1] value: V +/// +/// Neither the "entries" field nor the "key" field may be nullable. +/// +/// The metadata is structured so that Arrow systems without special handling +/// for Map can make Map an alias for List. The "layout" attribute for the Map +/// field must have the same contents as a List. +type Map struct { + _tab flatbuffers.Table +} + +func GetRootAsMap(buf []byte, offset flatbuffers.UOffsetT) *Map { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Map{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Map) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Map) Table() flatbuffers.Table { + return rcv._tab +} + +/// Set to true if the keys within each value are sorted +func (rcv *Map) KeysSorted() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +/// Set to true if the keys within each value are sorted +func (rcv *Map) MutateKeysSorted(n bool) bool { + return rcv._tab.MutateBoolSlot(4, n) +} + +func MapStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func MapAddKeysSorted(builder *flatbuffers.Builder, keysSorted bool) { + builder.PrependBoolSlot(0, keysSorted, false) +} +func MapEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Message.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Message.go new file mode 100644 index 000000000000..76fcc389d1de --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Message.go @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Message struct { + _tab flatbuffers.Table +} + +func GetRootAsMessage(buf []byte, offset flatbuffers.UOffsetT) *Message { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Message{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Message) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Message) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Message) Version() MetadataVersion { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Message) MutateVersion(n MetadataVersion) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func (rcv *Message) HeaderType() byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetByte(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Message) MutateHeaderType(n byte) bool { + return rcv._tab.MutateByteSlot(6, n) +} + +func (rcv *Message) Header(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +func (rcv *Message) BodyLength() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Message) MutateBodyLength(n int64) bool { + return rcv._tab.MutateInt64Slot(10, n) +} + +func (rcv *Message) CustomMetadata(obj *KeyValue, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Message) CustomMetadataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func MessageStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func MessageAddVersion(builder *flatbuffers.Builder, version int16) { + builder.PrependInt16Slot(0, version, 0) +} +func MessageAddHeaderType(builder *flatbuffers.Builder, headerType byte) { + builder.PrependByteSlot(1, headerType, 0) +} +func MessageAddHeader(builder *flatbuffers.Builder, header flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(header), 0) +} +func MessageAddBodyLength(builder *flatbuffers.Builder, bodyLength int64) { + builder.PrependInt64Slot(3, bodyLength, 0) +} +func MessageAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(customMetadata), 0) +} +func MessageStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func MessageEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/MessageHeader.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/MessageHeader.go new file mode 100644 index 000000000000..18730bed59fb --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/MessageHeader.go @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +/// ---------------------------------------------------------------------- +/// The root Message type +/// This union enables us to easily send different message types without +/// redundant storage, and in the future we can easily add new message types. +/// +/// Arrow implementations do not need to implement all of the message types, +/// which may include experimental metadata types. For maximum compatibility, +/// it is best to send data using RecordBatch +type MessageHeader = byte +const ( + MessageHeaderNONE MessageHeader = 0 + MessageHeaderSchema MessageHeader = 1 + MessageHeaderDictionaryBatch MessageHeader = 2 + MessageHeaderRecordBatch MessageHeader = 3 + MessageHeaderTensor MessageHeader = 4 + MessageHeaderSparseTensor MessageHeader = 5 +) + +var EnumNamesMessageHeader = map[MessageHeader]string{ + MessageHeaderNONE:"NONE", + MessageHeaderSchema:"Schema", + MessageHeaderDictionaryBatch:"DictionaryBatch", + MessageHeaderRecordBatch:"RecordBatch", + MessageHeaderTensor:"Tensor", + MessageHeaderSparseTensor:"SparseTensor", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/MetadataVersion.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/MetadataVersion.go new file mode 100644 index 000000000000..0094430c7500 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/MetadataVersion.go @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +type MetadataVersion = int16 +const ( + /// 0.1.0 + MetadataVersionV1 MetadataVersion = 0 + /// 0.2.0 + MetadataVersionV2 MetadataVersion = 1 + /// 0.3.0 -> 0.7.1 + MetadataVersionV3 MetadataVersion = 2 + /// >= 0.8.0 + MetadataVersionV4 MetadataVersion = 3 +) + +var EnumNamesMetadataVersion = map[MetadataVersion]string{ + MetadataVersionV1:"V1", + MetadataVersionV2:"V2", + MetadataVersionV3:"V3", + MetadataVersionV4:"V4", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Null.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Null.go new file mode 100644 index 000000000000..3c3eb4bda361 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Null.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// These are stored in the flatbuffer in the Type union below +type Null struct { + _tab flatbuffers.Table +} + +func GetRootAsNull(buf []byte, offset flatbuffers.UOffsetT) *Null { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Null{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Null) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Null) Table() flatbuffers.Table { + return rcv._tab +} + +func NullStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func NullEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Precision.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Precision.go new file mode 100644 index 000000000000..417d1e788e8a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Precision.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +type Precision = int16 +const ( + PrecisionHALF Precision = 0 + PrecisionSINGLE Precision = 1 + PrecisionDOUBLE Precision = 2 +) + +var EnumNamesPrecision = map[Precision]string{ + PrecisionHALF:"HALF", + PrecisionSINGLE:"SINGLE", + PrecisionDOUBLE:"DOUBLE", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/RecordBatch.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/RecordBatch.go new file mode 100644 index 000000000000..b997bdf16c7d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/RecordBatch.go @@ -0,0 +1,136 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A data header describing the shared memory layout of a "record" or "row" +/// batch. Some systems call this a "row batch" internally and others a "record +/// batch". +type RecordBatch struct { + _tab flatbuffers.Table +} + +func GetRootAsRecordBatch(buf []byte, offset flatbuffers.UOffsetT) *RecordBatch { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RecordBatch{} + x.Init(buf, n+offset) + return x +} + +func (rcv *RecordBatch) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RecordBatch) Table() flatbuffers.Table { + return rcv._tab +} + +/// number of records / rows. The arrays in the batch should all have this +/// length +func (rcv *RecordBatch) Length() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// number of records / rows. The arrays in the batch should all have this +/// length +func (rcv *RecordBatch) MutateLength(n int64) bool { + return rcv._tab.MutateInt64Slot(4, n) +} + +/// Nodes correspond to the pre-ordered flattened logical schema +func (rcv *RecordBatch) Nodes(obj *FieldNode, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 16 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *RecordBatch) NodesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Nodes correspond to the pre-ordered flattened logical schema +/// Buffers correspond to the pre-ordered flattened buffer tree +/// +/// The number of buffers appended to this list depends on the schema. For +/// example, most primitive arrays will have 2 buffers, 1 for the validity +/// bitmap and 1 for the values. For struct arrays, there will only be a +/// single buffer for the validity (nulls) bitmap +func (rcv *RecordBatch) Buffers(obj *Buffer, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 16 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *RecordBatch) BuffersLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Buffers correspond to the pre-ordered flattened buffer tree +/// +/// The number of buffers appended to this list depends on the schema. For +/// example, most primitive arrays will have 2 buffers, 1 for the validity +/// bitmap and 1 for the values. For struct arrays, there will only be a +/// single buffer for the validity (nulls) bitmap +func RecordBatchStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func RecordBatchAddLength(builder *flatbuffers.Builder, length int64) { + builder.PrependInt64Slot(0, length, 0) +} +func RecordBatchAddNodes(builder *flatbuffers.Builder, nodes flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(nodes), 0) +} +func RecordBatchStartNodesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(16, numElems, 8) +} +func RecordBatchAddBuffers(builder *flatbuffers.Builder, buffers flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(buffers), 0) +} +func RecordBatchStartBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(16, numElems, 8) +} +func RecordBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Schema.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Schema.go new file mode 100644 index 000000000000..d3e27373b2e4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Schema.go @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// A Schema describes the columns in a row batch +type Schema struct { + _tab flatbuffers.Table +} + +func GetRootAsSchema(buf []byte, offset flatbuffers.UOffsetT) *Schema { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Schema{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Schema) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Schema) Table() flatbuffers.Table { + return rcv._tab +} + +/// endianness of the buffer +/// it is Little Endian by default +/// if endianness doesn't match the underlying system then the vectors need to be converted +func (rcv *Schema) Endianness() Endianness { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +/// endianness of the buffer +/// it is Little Endian by default +/// if endianness doesn't match the underlying system then the vectors need to be converted +func (rcv *Schema) MutateEndianness(n Endianness) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func (rcv *Schema) Fields(obj *Field, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Schema) FieldsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *Schema) CustomMetadata(obj *KeyValue, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Schema) CustomMetadataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func SchemaStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func SchemaAddEndianness(builder *flatbuffers.Builder, endianness int16) { + builder.PrependInt16Slot(0, endianness, 0) +} +func SchemaAddFields(builder *flatbuffers.Builder, fields flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(fields), 0) +} +func SchemaStartFieldsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SchemaAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(customMetadata), 0) +} +func SchemaStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SchemaEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseMatrixIndexCSR.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseMatrixIndexCSR.go new file mode 100644 index 000000000000..de8217650b28 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseMatrixIndexCSR.go @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Compressed Sparse Row format, that is matrix-specific. +type SparseMatrixIndexCSR struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseMatrixIndexCSR(buf []byte, offset flatbuffers.UOffsetT) *SparseMatrixIndexCSR { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseMatrixIndexCSR{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseMatrixIndexCSR) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseMatrixIndexCSR) Table() flatbuffers.Table { + return rcv._tab +} + +/// The type of values in indptrBuffer +func (rcv *SparseMatrixIndexCSR) IndptrType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indptrBuffer +/// indptrBuffer stores the location and size of indptr array that +/// represents the range of the rows. +/// The i-th row spans from indptr[i] to indptr[i+1] in the data. +/// The length of this array is 1 + (the number of rows), and the type +/// of index value is long. +/// +/// For example, let X be the following 6x4 matrix: +/// +/// X := [[0, 1, 2, 0], +/// [0, 0, 3, 0], +/// [0, 4, 0, 5], +/// [0, 0, 0, 0], +/// [6, 0, 7, 8], +/// [0, 9, 0, 0]]. +/// +/// The array of non-zero values in X is: +/// +/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +/// +/// And the indptr of X is: +/// +/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// indptrBuffer stores the location and size of indptr array that +/// represents the range of the rows. +/// The i-th row spans from indptr[i] to indptr[i+1] in the data. +/// The length of this array is 1 + (the number of rows), and the type +/// of index value is long. +/// +/// For example, let X be the following 6x4 matrix: +/// +/// X := [[0, 1, 2, 0], +/// [0, 0, 3, 0], +/// [0, 4, 0, 5], +/// [0, 0, 0, 0], +/// [6, 0, 7, 8], +/// [0, 9, 0, 0]]. +/// +/// The array of non-zero values in X is: +/// +/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +/// +/// And the indptr of X is: +/// +/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +/// The type of values in indicesBuffer +func (rcv *SparseMatrixIndexCSR) IndicesType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indicesBuffer +/// indicesBuffer stores the location and size of the array that +/// contains the column indices of the corresponding non-zero values. +/// The type of index value is long. +/// +/// For example, the indices of the above X is: +/// +/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +/// +/// Note that the indices are sorted in lexicographical order for each row. +func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// indicesBuffer stores the location and size of the array that +/// contains the column indices of the corresponding non-zero values. +/// The type of index value is long. +/// +/// For example, the indices of the above X is: +/// +/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +/// +/// Note that the indices are sorted in lexicographical order for each row. +func SparseMatrixIndexCSRStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func SparseMatrixIndexCSRAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indptrType), 0) +} +func SparseMatrixIndexCSRAddIndptrBuffer(builder *flatbuffers.Builder, indptrBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(1, flatbuffers.UOffsetT(indptrBuffer), 0) +} +func SparseMatrixIndexCSRAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(indicesType), 0) +} +func SparseMatrixIndexCSRAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(3, flatbuffers.UOffsetT(indicesBuffer), 0) +} +func SparseMatrixIndexCSREnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensor.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensor.go new file mode 100644 index 000000000000..389832b95a14 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensor.go @@ -0,0 +1,175 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type SparseTensor struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseTensor(buf []byte, offset flatbuffers.UOffsetT) *SparseTensor { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseTensor{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseTensor) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseTensor) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *SparseTensor) TypeType() byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetByte(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *SparseTensor) MutateTypeType(n byte) bool { + return rcv._tab.MutateByteSlot(4, n) +} + +/// The type of data contained in a value cell. +/// Currently only fixed-width value types are supported, +/// no strings or nested types. +func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// The type of data contained in a value cell. +/// Currently only fixed-width value types are supported, +/// no strings or nested types. +/// The dimensions of the tensor, optionally named. +func (rcv *SparseTensor) Shape(obj *TensorDim, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SparseTensor) ShapeLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// The dimensions of the tensor, optionally named. +/// The number of non-zero values in a sparse tensor. +func (rcv *SparseTensor) NonZeroLength() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// The number of non-zero values in a sparse tensor. +func (rcv *SparseTensor) MutateNonZeroLength(n int64) bool { + return rcv._tab.MutateInt64Slot(10, n) +} + +func (rcv *SparseTensor) SparseIndexType() byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.GetByte(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *SparseTensor) MutateSparseIndexType(n byte) bool { + return rcv._tab.MutateByteSlot(12, n) +} + +/// Sparse tensor index +func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// Sparse tensor index +/// The location and size of the tensor's data +func (rcv *SparseTensor) Data(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The location and size of the tensor's data +func SparseTensorStart(builder *flatbuffers.Builder) { + builder.StartObject(7) +} +func SparseTensorAddTypeType(builder *flatbuffers.Builder, typeType byte) { + builder.PrependByteSlot(0, typeType, 0) +} +func SparseTensorAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0) +} +func SparseTensorAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(shape), 0) +} +func SparseTensorStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SparseTensorAddNonZeroLength(builder *flatbuffers.Builder, nonZeroLength int64) { + builder.PrependInt64Slot(3, nonZeroLength, 0) +} +func SparseTensorAddSparseIndexType(builder *flatbuffers.Builder, sparseIndexType byte) { + builder.PrependByteSlot(4, sparseIndexType, 0) +} +func SparseTensorAddSparseIndex(builder *flatbuffers.Builder, sparseIndex flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(sparseIndex), 0) +} +func SparseTensorAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { + builder.PrependStructSlot(6, flatbuffers.UOffsetT(data), 0) +} +func SparseTensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensorIndex.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensorIndex.go new file mode 100644 index 000000000000..4b82de7efece --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensorIndex.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +type SparseTensorIndex = byte +const ( + SparseTensorIndexNONE SparseTensorIndex = 0 + SparseTensorIndexSparseTensorIndexCOO SparseTensorIndex = 1 + SparseTensorIndexSparseMatrixIndexCSR SparseTensorIndex = 2 +) + +var EnumNamesSparseTensorIndex = map[SparseTensorIndex]string{ + SparseTensorIndexNONE:"NONE", + SparseTensorIndexSparseTensorIndexCOO:"SparseTensorIndexCOO", + SparseTensorIndexSparseMatrixIndexCSR:"SparseMatrixIndexCSR", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensorIndexCOO.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensorIndexCOO.go new file mode 100644 index 000000000000..77c8dee902f5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/SparseTensorIndexCOO.go @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// EXPERIMENTAL: Data structures for sparse tensors +/// Coodinate (COO) format of sparse tensor index. +/// +/// COO's index list are represented as a NxM matrix, +/// where N is the number of non-zero values, +/// and M is the number of dimensions of a sparse tensor. +/// +/// indicesBuffer stores the location and size of the data of this indices +/// matrix. The value type and the stride of the indices matrix is +/// specified in indicesType and indicesStrides fields. +/// +/// For example, let X be a 2x3x4x5 tensor, and it has the following +/// 6 non-zero values: +/// +/// X[0, 1, 2, 0] := 1 +/// X[1, 1, 2, 3] := 2 +/// X[0, 2, 1, 0] := 3 +/// X[0, 1, 3, 0] := 4 +/// X[0, 1, 2, 1] := 5 +/// X[1, 2, 0, 4] := 6 +/// +/// In COO format, the index matrix of X is the following 4x6 matrix: +/// +/// [[0, 0, 0, 0, 1, 1], +/// [1, 1, 1, 2, 1, 2], +/// [2, 2, 3, 1, 2, 0], +/// [0, 1, 0, 0, 3, 4]] +/// +/// Note that the indices are sorted in lexicographical order. +type SparseTensorIndexCOO struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseTensorIndexCOO(buf []byte, offset flatbuffers.UOffsetT) *SparseTensorIndexCOO { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseTensorIndexCOO{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseTensorIndexCOO) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseTensorIndexCOO) Table() flatbuffers.Table { + return rcv._tab +} + +/// The type of values in indicesBuffer +func (rcv *SparseTensorIndexCOO) IndicesType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indicesBuffer +/// Non-negative byte offsets to advance one value cell along each dimension +func (rcv *SparseTensorIndexCOO) IndicesStrides(j int) int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)) + } + return 0 +} + +func (rcv *SparseTensorIndexCOO) IndicesStridesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Non-negative byte offsets to advance one value cell along each dimension +func (rcv *SparseTensorIndexCOO) MutateIndicesStrides(j int, n int64) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n) + } + return false +} + +/// The location and size of the indices matrix's data +func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The location and size of the indices matrix's data +func SparseTensorIndexCOOStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func SparseTensorIndexCOOAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indicesType), 0) +} +func SparseTensorIndexCOOAddIndicesStrides(builder *flatbuffers.Builder, indicesStrides flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indicesStrides), 0) +} +func SparseTensorIndexCOOStartIndicesStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} +func SparseTensorIndexCOOAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(2, flatbuffers.UOffsetT(indicesBuffer), 0) +} +func SparseTensorIndexCOOEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Struct_.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Struct_.go new file mode 100644 index 000000000000..427e7060382a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Struct_.go @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct +/// (according to the physical memory layout). We used Struct_ here as +/// Struct is a reserved word in Flatbuffers +type Struct_ struct { + _tab flatbuffers.Table +} + +func GetRootAsStruct_(buf []byte, offset flatbuffers.UOffsetT) *Struct_ { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Struct_{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Struct_) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Struct_) Table() flatbuffers.Table { + return rcv._tab +} + +func Struct_Start(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func Struct_End(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Tensor.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Tensor.go new file mode 100644 index 000000000000..f8341e02c0bd --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Tensor.go @@ -0,0 +1,161 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Tensor struct { + _tab flatbuffers.Table +} + +func GetRootAsTensor(buf []byte, offset flatbuffers.UOffsetT) *Tensor { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Tensor{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Tensor) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Tensor) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Tensor) TypeType() byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetByte(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Tensor) MutateTypeType(n byte) bool { + return rcv._tab.MutateByteSlot(4, n) +} + +/// The type of data contained in a value cell. Currently only fixed-width +/// value types are supported, no strings or nested types +func (rcv *Tensor) Type(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// The type of data contained in a value cell. Currently only fixed-width +/// value types are supported, no strings or nested types +/// The dimensions of the tensor, optionally named +func (rcv *Tensor) Shape(obj *TensorDim, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Tensor) ShapeLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// The dimensions of the tensor, optionally named +/// Non-negative byte offsets to advance one value cell along each dimension +func (rcv *Tensor) Strides(j int) int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)) + } + return 0 +} + +func (rcv *Tensor) StridesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Non-negative byte offsets to advance one value cell along each dimension +func (rcv *Tensor) MutateStrides(j int, n int64) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n) + } + return false +} + +/// The location and size of the tensor's data +func (rcv *Tensor) Data(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The location and size of the tensor's data +func TensorStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func TensorAddTypeType(builder *flatbuffers.Builder, typeType byte) { + builder.PrependByteSlot(0, typeType, 0) +} +func TensorAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0) +} +func TensorAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(shape), 0) +} +func TensorStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func TensorAddStrides(builder *flatbuffers.Builder, strides flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(strides), 0) +} +func TensorStartStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} +func TensorAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { + builder.PrependStructSlot(4, flatbuffers.UOffsetT(data), 0) +} +func TensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/TensorDim.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/TensorDim.go new file mode 100644 index 000000000000..14b82120887e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/TensorDim.go @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// Data structures for dense tensors +/// Shape data for a single axis in a tensor +type TensorDim struct { + _tab flatbuffers.Table +} + +func GetRootAsTensorDim(buf []byte, offset flatbuffers.UOffsetT) *TensorDim { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &TensorDim{} + x.Init(buf, n+offset) + return x +} + +func (rcv *TensorDim) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *TensorDim) Table() flatbuffers.Table { + return rcv._tab +} + +/// Length of dimension +func (rcv *TensorDim) Size() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// Length of dimension +func (rcv *TensorDim) MutateSize(n int64) bool { + return rcv._tab.MutateInt64Slot(4, n) +} + +/// Name of the dimension, optional +func (rcv *TensorDim) Name() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +/// Name of the dimension, optional +func TensorDimStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TensorDimAddSize(builder *flatbuffers.Builder, size int64) { + builder.PrependInt64Slot(0, size, 0) +} +func TensorDimAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(name), 0) +} +func TensorDimEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Time.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Time.go new file mode 100644 index 000000000000..f3fa99f07b85 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Time.go @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Time type. The physical storage type depends on the unit +/// - SECOND and MILLISECOND: 32 bits +/// - MICROSECOND and NANOSECOND: 64 bits +type Time struct { + _tab flatbuffers.Table +} + +func GetRootAsTime(buf []byte, offset flatbuffers.UOffsetT) *Time { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Time{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Time) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Time) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Time) Unit() TimeUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 1 +} + +func (rcv *Time) MutateUnit(n TimeUnit) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func (rcv *Time) BitWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 32 +} + +func (rcv *Time) MutateBitWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(6, n) +} + +func TimeStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TimeAddUnit(builder *flatbuffers.Builder, unit int16) { + builder.PrependInt16Slot(0, unit, 1) +} +func TimeAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) { + builder.PrependInt32Slot(1, bitWidth, 32) +} +func TimeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/TimeUnit.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/TimeUnit.go new file mode 100644 index 000000000000..cb0c7427d6c6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/TimeUnit.go @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +type TimeUnit = int16 +const ( + TimeUnitSECOND TimeUnit = 0 + TimeUnitMILLISECOND TimeUnit = 1 + TimeUnitMICROSECOND TimeUnit = 2 + TimeUnitNANOSECOND TimeUnit = 3 +) + +var EnumNamesTimeUnit = map[TimeUnit]string{ + TimeUnitSECOND:"SECOND", + TimeUnitMILLISECOND:"MILLISECOND", + TimeUnitMICROSECOND:"MICROSECOND", + TimeUnitNANOSECOND:"NANOSECOND", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Timestamp.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Timestamp.go new file mode 100644 index 000000000000..dd98c8226ec9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Timestamp.go @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Time elapsed from the Unix epoch, 00:00:00.000 on 1 January 1970, excluding +/// leap seconds, as a 64-bit integer. Note that UNIX time does not include +/// leap seconds. +/// +/// The Timestamp metadata supports both "time zone naive" and "time zone +/// aware" timestamps. Read about the timezone attribute for more detail +type Timestamp struct { + _tab flatbuffers.Table +} + +func GetRootAsTimestamp(buf []byte, offset flatbuffers.UOffsetT) *Timestamp { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Timestamp{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Timestamp) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Timestamp) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Timestamp) Unit() TimeUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Timestamp) MutateUnit(n TimeUnit) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +/// The time zone is a string indicating the name of a time zone, one of: +/// +/// * As used in the Olson time zone database (the "tz database" or +/// "tzdata"), such as "America/New_York" +/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 +/// +/// Whether a timezone string is present indicates different semantics about +/// the data: +/// +/// * If the time zone is null or equal to an empty string, the data is "time +/// zone naive" and shall be displayed *as is* to the user, not localized +/// to the locale of the user. This data can be though of as UTC but +/// without having "UTC" as the time zone, it is not considered to be +/// localized to any time zone +/// +/// * If the time zone is set to a valid value, values can be displayed as +/// "localized" to that time zone, even though the underlying 64-bit +/// integers are identical to the same data stored in UTC. Converting +/// between time zones is a metadata-only operation and does not change the +/// underlying values +func (rcv *Timestamp) Timezone() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +/// The time zone is a string indicating the name of a time zone, one of: +/// +/// * As used in the Olson time zone database (the "tz database" or +/// "tzdata"), such as "America/New_York" +/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 +/// +/// Whether a timezone string is present indicates different semantics about +/// the data: +/// +/// * If the time zone is null or equal to an empty string, the data is "time +/// zone naive" and shall be displayed *as is* to the user, not localized +/// to the locale of the user. This data can be though of as UTC but +/// without having "UTC" as the time zone, it is not considered to be +/// localized to any time zone +/// +/// * If the time zone is set to a valid value, values can be displayed as +/// "localized" to that time zone, even though the underlying 64-bit +/// integers are identical to the same data stored in UTC. Converting +/// between time zones is a metadata-only operation and does not change the +/// underlying values +func TimestampStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TimestampAddUnit(builder *flatbuffers.Builder, unit int16) { + builder.PrependInt16Slot(0, unit, 0) +} +func TimestampAddTimezone(builder *flatbuffers.Builder, timezone flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(timezone), 0) +} +func TimestampEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Type.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Type.go new file mode 100644 index 000000000000..e3fbe2bb110f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Type.go @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +/// ---------------------------------------------------------------------- +/// Top-level Type value, enabling extensible type-specific metadata. We can +/// add new logical types to Type without breaking backwards compatibility +type Type = byte +const ( + TypeNONE Type = 0 + TypeNull Type = 1 + TypeInt Type = 2 + TypeFloatingPoint Type = 3 + TypeBinary Type = 4 + TypeUtf8 Type = 5 + TypeBool Type = 6 + TypeDecimal Type = 7 + TypeDate Type = 8 + TypeTime Type = 9 + TypeTimestamp Type = 10 + TypeInterval Type = 11 + TypeList Type = 12 + TypeStruct_ Type = 13 + TypeUnion Type = 14 + TypeFixedSizeBinary Type = 15 + TypeFixedSizeList Type = 16 + TypeMap Type = 17 + TypeDuration Type = 18 + TypeLargeBinary Type = 19 + TypeLargeUtf8 Type = 20 + TypeLargeList Type = 21 +) + +var EnumNamesType = map[Type]string{ + TypeNONE:"NONE", + TypeNull:"Null", + TypeInt:"Int", + TypeFloatingPoint:"FloatingPoint", + TypeBinary:"Binary", + TypeUtf8:"Utf8", + TypeBool:"Bool", + TypeDecimal:"Decimal", + TypeDate:"Date", + TypeTime:"Time", + TypeTimestamp:"Timestamp", + TypeInterval:"Interval", + TypeList:"List", + TypeStruct_:"Struct_", + TypeUnion:"Union", + TypeFixedSizeBinary:"FixedSizeBinary", + TypeFixedSizeList:"FixedSizeList", + TypeMap:"Map", + TypeDuration:"Duration", + TypeLargeBinary:"LargeBinary", + TypeLargeUtf8:"LargeUtf8", + TypeLargeList:"LargeList", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Union.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Union.go new file mode 100644 index 000000000000..1eae176e630c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Union.go @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A union is a complex type with children in Field +/// By default ids in the type vector refer to the offsets in the children +/// optionally typeIds provides an indirection between the child offset and the type id +/// for each child typeIds[offset] is the id used in the type vector +type Union struct { + _tab flatbuffers.Table +} + +func GetRootAsUnion(buf []byte, offset flatbuffers.UOffsetT) *Union { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Union{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Union) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Union) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Union) Mode() UnionMode { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt16(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Union) MutateMode(n UnionMode) bool { + return rcv._tab.MutateInt16Slot(4, n) +} + +func (rcv *Union) TypeIds(j int) int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) + } + return 0 +} + +func (rcv *Union) TypeIdsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *Union) MutateTypeIds(j int, n int32) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n) + } + return false +} + +func UnionStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func UnionAddMode(builder *flatbuffers.Builder, mode int16) { + builder.PrependInt16Slot(0, mode, 0) +} +func UnionAddTypeIds(builder *flatbuffers.Builder, typeIds flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(typeIds), 0) +} +func UnionStartTypeIdsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func UnionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/UnionMode.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/UnionMode.go new file mode 100644 index 000000000000..4357682ad4d6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/UnionMode.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +type UnionMode = int16 +const ( + UnionModeSparse UnionMode = 0 + UnionModeDense UnionMode = 1 +) + +var EnumNamesUnionMode = map[UnionMode]string{ + UnionModeSparse:"Sparse", + UnionModeDense:"Dense", +} + diff --git a/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Utf8.go b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Utf8.go new file mode 100644 index 000000000000..4ff365a37504 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/internal/flatbuf/Utf8.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Unicode with UTF-8 encoding +type Utf8 struct { + _tab flatbuffers.Table +} + +func GetRootAsUtf8(buf []byte, offset flatbuffers.UOffsetT) *Utf8 { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Utf8{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Utf8) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Utf8) Table() flatbuffers.Table { + return rcv._tab +} + +func Utf8Start(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func Utf8End(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/dict.go b/vendor/github.com/apache/arrow/go/arrow/ipc/dict.go new file mode 100644 index 000000000000..30c5b353bfce --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/dict.go @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "golang.org/x/xerrors" +) + +type dictMap map[int64]array.Interface +type dictTypeMap map[int64]arrow.Field + +type dictMemo struct { + dict2id map[array.Interface]int64 + id2dict dictMap // map of dictionary ID to dictionary array +} + +func newMemo() dictMemo { + return dictMemo{ + dict2id: make(map[array.Interface]int64), + id2dict: make(dictMap), + } +} + +func (memo *dictMemo) Len() int { return len(memo.id2dict) } + +func (memo *dictMemo) delete() { + for id, v := range memo.id2dict { + delete(memo.id2dict, id) + delete(memo.dict2id, v) + v.Release() + } +} + +func (memo dictMemo) Dict(id int64) (array.Interface, bool) { + v, ok := memo.id2dict[id] + return v, ok +} + +func (memo *dictMemo) ID(v array.Interface) int64 { + id, ok := memo.dict2id[v] + if ok { + return id + } + + v.Retain() + id = int64(len(memo.dict2id)) + memo.dict2id[v] = id + memo.id2dict[id] = v + return id +} + +func (memo dictMemo) HasDict(v array.Interface) bool { + _, ok := memo.dict2id[v] + return ok +} + +func (memo dictMemo) HasID(id int64) bool { + _, ok := memo.id2dict[id] + return ok +} + +func (memo *dictMemo) Add(id int64, v array.Interface) { + if _, dup := memo.id2dict[id]; dup { + panic(xerrors.Errorf("arrow/ipc: duplicate id=%d", id)) + } + v.Retain() + memo.id2dict[id] = v + memo.dict2id[v] = id +} diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/file_reader.go b/vendor/github.com/apache/arrow/go/arrow/ipc/file_reader.go new file mode 100644 index 000000000000..b7509d0c4799 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/file_reader.go @@ -0,0 +1,570 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "bytes" + "encoding/binary" + "io" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/flatbuf" + "github.com/apache/arrow/go/arrow/memory" + "golang.org/x/xerrors" +) + +// FileReader is an Arrow file reader. +type FileReader struct { + r ReadAtSeeker + + footer struct { + offset int64 + buffer *memory.Buffer + data *flatbuf.Footer + } + + fields dictTypeMap + memo dictMemo + + schema *arrow.Schema + record array.Record + + irec int // current record index. used for the arrio.Reader interface + err error // last error +} + +// NewFileReader opens an Arrow file using the provided reader r. +func NewFileReader(r ReadAtSeeker, opts ...Option) (*FileReader, error) { + var ( + cfg = newConfig(opts...) + err error + + f = FileReader{ + r: r, + fields: make(dictTypeMap), + memo: newMemo(), + } + ) + + if cfg.footer.offset <= 0 { + cfg.footer.offset, err = f.r.Seek(0, io.SeekEnd) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could retrieve footer offset: %w", err) + } + } + f.footer.offset = cfg.footer.offset + + err = f.readFooter() + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not decode footer: %w", err) + } + + err = f.readSchema() + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not decode schema: %w", err) + } + + if cfg.schema != nil && !cfg.schema.Equal(f.schema) { + return nil, xerrors.Errorf("arrow/ipc: inconsistent schema for reading (got: %v, want: %v)", f.schema, cfg.schema) + } + + return &f, err +} + +func (f *FileReader) readFooter() error { + var err error + + if f.footer.offset <= int64(len(Magic)*2+4) { + return xerrors.Errorf("arrow/ipc: file too small (size=%d)", f.footer.offset) + } + + eof := int64(len(Magic) + 4) + buf := make([]byte, eof) + n, err := f.r.ReadAt(buf, f.footer.offset-eof) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not read footer: %w", err) + } + if n != len(buf) { + return xerrors.Errorf("arrow/ipc: could not read %d bytes from end of file", len(buf)) + } + + if !bytes.Equal(buf[4:], Magic) { + return errNotArrowFile + } + + size := int64(binary.LittleEndian.Uint32(buf[:4])) + if size <= 0 || size+int64(len(Magic)*2+4) > f.footer.offset { + return errInconsistentFileMetadata + } + + buf = make([]byte, size) + n, err = f.r.ReadAt(buf, f.footer.offset-size-eof) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not read footer data: %w", err) + } + if n != len(buf) { + return xerrors.Errorf("arrow/ipc: could not read %d bytes from footer data", len(buf)) + } + + f.footer.buffer = memory.NewBufferBytes(buf) + f.footer.data = flatbuf.GetRootAsFooter(buf, 0) + return err +} + +func (f *FileReader) readSchema() error { + var err error + f.fields, err = dictTypesFromFB(f.footer.data.Schema(nil)) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not load dictionary types from file: %w", err) + } + + for i := 0; i < f.NumDictionaries(); i++ { + blk, err := f.dict(i) + if err != nil { + return xerrors.Errorf("arrow/ipc: could read dictionary[%d]: %w", i, err) + } + switch { + case !bitutil.IsMultipleOf8(blk.Offset): + return xerrors.Errorf("arrow/ipc: invalid file offset=%d for dictionary %d", blk.Offset, i) + case !bitutil.IsMultipleOf8(int64(blk.Meta)): + return xerrors.Errorf("arrow/ipc: invalid file metadata=%d position for dictionary %d", blk.Meta, i) + case !bitutil.IsMultipleOf8(blk.Body): + return xerrors.Errorf("arrow/ipc: invalid file body=%d position for dictionary %d", blk.Body, i) + } + + msg, err := blk.NewMessage() + if err != nil { + return err + } + defer msg.Release() + + id, dict, err := readDictionary(msg.meta, f.fields, f.r) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not read dictionary %d from file: %w", i, err) + } + f.memo.Add(id, dict) + dict.Release() // memo.Add increases ref-count of dict. + } + + schema := f.footer.data.Schema(nil) + if schema == nil { + return xerrors.Errorf("arrow/ipc: could not load schema from flatbuffer data") + } + f.schema, err = schemaFromFB(schema, &f.memo) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not read schema: %w", err) + } + + return err +} + +func (f *FileReader) block(i int) (fileBlock, error) { + var blk flatbuf.Block + if !f.footer.data.RecordBatches(&blk, i) { + return fileBlock{}, xerrors.Errorf("arrow/ipc: could not extract file block %d", i) + } + + return fileBlock{ + Offset: blk.Offset(), + Meta: blk.MetaDataLength(), + Body: blk.BodyLength(), + r: f.r, + }, nil +} + +func (f *FileReader) dict(i int) (fileBlock, error) { + var blk flatbuf.Block + if !f.footer.data.Dictionaries(&blk, i) { + return fileBlock{}, xerrors.Errorf("arrow/ipc: could not extract dictionary block %d", i) + } + + return fileBlock{ + Offset: blk.Offset(), + Meta: blk.MetaDataLength(), + Body: blk.BodyLength(), + r: f.r, + }, nil +} + +func (f *FileReader) Schema() *arrow.Schema { + return f.schema +} + +func (f *FileReader) NumDictionaries() int { + if f.footer.data == nil { + return 0 + } + return f.footer.data.DictionariesLength() +} + +func (f *FileReader) NumRecords() int { + return f.footer.data.RecordBatchesLength() +} + +func (f *FileReader) Version() MetadataVersion { + return MetadataVersion(f.footer.data.Version()) +} + +// Close cleans up resources used by the File. +// Close does not close the underlying reader. +func (f *FileReader) Close() error { + if f.footer.data != nil { + f.footer.data = nil + } + + if f.footer.buffer != nil { + f.footer.buffer.Release() + f.footer.buffer = nil + } + + if f.record != nil { + f.record.Release() + f.record = nil + } + return nil +} + +// Record returns the i-th record from the file. +// The returned value is valid until the next call to Record. +// Users need to call Retain on that Record to keep it valid for longer. +func (f *FileReader) Record(i int) (array.Record, error) { + if i < 0 || i > f.NumRecords() { + panic("arrow/ipc: record index out of bounds") + } + + blk, err := f.block(i) + if err != nil { + return nil, err + } + switch { + case !bitutil.IsMultipleOf8(blk.Offset): + return nil, xerrors.Errorf("arrow/ipc: invalid file offset=%d for record %d", blk.Offset, i) + case !bitutil.IsMultipleOf8(int64(blk.Meta)): + return nil, xerrors.Errorf("arrow/ipc: invalid file metadata=%d position for record %d", blk.Meta, i) + case !bitutil.IsMultipleOf8(blk.Body): + return nil, xerrors.Errorf("arrow/ipc: invalid file body=%d position for record %d", blk.Body, i) + } + + msg, err := blk.NewMessage() + if err != nil { + return nil, err + } + defer msg.Release() + + if msg.Type() != MessageRecordBatch { + return nil, xerrors.Errorf("arrow/ipc: message %d is not a Record", i) + } + + if f.record != nil { + f.record.Release() + } + + f.record = newRecord(f.schema, msg.meta, bytes.NewReader(msg.body.Bytes())) + return f.record, nil +} + +// Read reads the current record from the underlying stream and an error, if any. +// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). +// +// The returned record value is valid until the next call to Read. +// Users need to call Retain on that Record to keep it valid for longer. +func (f *FileReader) Read() (rec array.Record, err error) { + if f.irec == f.NumRecords() { + return nil, io.EOF + } + rec, f.err = f.Record(f.irec) + f.irec++ + return rec, f.err +} + +// ReadAt reads the i-th record from the underlying stream and an error, if any. +func (f *FileReader) ReadAt(i int64) (array.Record, error) { + return f.Record(int(i)) +} + +func newRecord(schema *arrow.Schema, meta *memory.Buffer, body ReadAtSeeker) array.Record { + var ( + msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) + md flatbuf.RecordBatch + ) + initFB(&md, msg.Header) + rows := md.Length() + + ctx := &arrayLoaderContext{ + src: ipcSource{ + meta: &md, + r: body, + }, + max: kMaxNestingDepth, + } + + cols := make([]array.Interface, len(schema.Fields())) + for i, field := range schema.Fields() { + cols[i] = ctx.loadArray(field.Type) + } + + return array.NewRecord(schema, cols, rows) +} + +type ipcSource struct { + meta *flatbuf.RecordBatch + r ReadAtSeeker +} + +func (src *ipcSource) buffer(i int) *memory.Buffer { + var buf flatbuf.Buffer + if !src.meta.Buffers(&buf, i) { + panic("buffer index out of bound") + } + if buf.Length() == 0 { + return memory.NewBufferBytes(nil) + } + + raw := make([]byte, buf.Length()) + _, err := src.r.ReadAt(raw, buf.Offset()) + if err != nil { + panic(err) + } + + return memory.NewBufferBytes(raw) +} + +func (src *ipcSource) fieldMetadata(i int) *flatbuf.FieldNode { + var node flatbuf.FieldNode + if !src.meta.Nodes(&node, i) { + panic("field metadata out of bound") + } + return &node +} + +type arrayLoaderContext struct { + src ipcSource + ifield int + ibuffer int + max int +} + +func (ctx *arrayLoaderContext) field() *flatbuf.FieldNode { + field := ctx.src.fieldMetadata(ctx.ifield) + ctx.ifield++ + return field +} + +func (ctx *arrayLoaderContext) buffer() *memory.Buffer { + buf := ctx.src.buffer(ctx.ibuffer) + ctx.ibuffer++ + return buf +} + +func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) array.Interface { + switch dt := dt.(type) { + case *arrow.NullType: + return ctx.loadNull() + + case *arrow.BooleanType, + *arrow.Int8Type, *arrow.Int16Type, *arrow.Int32Type, *arrow.Int64Type, + *arrow.Uint8Type, *arrow.Uint16Type, *arrow.Uint32Type, *arrow.Uint64Type, + *arrow.Float16Type, *arrow.Float32Type, *arrow.Float64Type, + *arrow.Decimal128Type, + *arrow.Time32Type, *arrow.Time64Type, + *arrow.TimestampType, + *arrow.Date32Type, *arrow.Date64Type, + *arrow.MonthIntervalType, *arrow.DayTimeIntervalType, + *arrow.DurationType: + return ctx.loadPrimitive(dt) + + case *arrow.BinaryType, *arrow.StringType: + return ctx.loadBinary(dt) + + case *arrow.FixedSizeBinaryType: + return ctx.loadFixedSizeBinary(dt) + + case *arrow.ListType: + return ctx.loadList(dt) + + case *arrow.FixedSizeListType: + return ctx.loadFixedSizeList(dt) + + case *arrow.StructType: + return ctx.loadStruct(dt) + + default: + panic(xerrors.Errorf("array type %T not handled yet", dt)) + } +} + +func (ctx *arrayLoaderContext) loadCommon(nbufs int) (*flatbuf.FieldNode, []*memory.Buffer) { + buffers := make([]*memory.Buffer, 0, nbufs) + field := ctx.field() + + var buf *memory.Buffer + switch field.NullCount() { + case 0: + ctx.ibuffer++ + default: + buf = ctx.buffer() + } + buffers = append(buffers, buf) + + return field, buffers +} + +func (ctx *arrayLoaderContext) loadChild(dt arrow.DataType) array.Interface { + if ctx.max == 0 { + panic("arrow/ipc: nested type limit reached") + } + ctx.max-- + sub := ctx.loadArray(dt) + ctx.max++ + return sub +} + +func (ctx *arrayLoaderContext) loadNull() array.Interface { + field := ctx.field() + data := array.NewData(arrow.Null, int(field.Length()), nil, nil, int(field.NullCount()), 0) + defer data.Release() + + return array.MakeFromData(data) +} + +func (ctx *arrayLoaderContext) loadPrimitive(dt arrow.DataType) array.Interface { + field, buffers := ctx.loadCommon(2) + + switch field.Length() { + case 0: + buffers = append(buffers, nil) + ctx.ibuffer++ + default: + buffers = append(buffers, ctx.buffer()) + } + + data := array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) + defer data.Release() + + return array.MakeFromData(data) +} + +func (ctx *arrayLoaderContext) loadBinary(dt arrow.DataType) array.Interface { + field, buffers := ctx.loadCommon(3) + buffers = append(buffers, ctx.buffer(), ctx.buffer()) + + data := array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) + defer data.Release() + + return array.MakeFromData(data) +} + +func (ctx *arrayLoaderContext) loadFixedSizeBinary(dt *arrow.FixedSizeBinaryType) array.Interface { + field, buffers := ctx.loadCommon(2) + buffers = append(buffers, ctx.buffer()) + + data := array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) + defer data.Release() + + return array.MakeFromData(data) +} + +func (ctx *arrayLoaderContext) loadList(dt *arrow.ListType) array.Interface { + field, buffers := ctx.loadCommon(2) + buffers = append(buffers, ctx.buffer()) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + data := array.NewData(dt, int(field.Length()), buffers, []*array.Data{sub.Data()}, int(field.NullCount()), 0) + defer data.Release() + + return array.NewListData(data) +} + +func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) array.Interface { + field, buffers := ctx.loadCommon(1) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + data := array.NewData(dt, int(field.Length()), buffers, []*array.Data{sub.Data()}, int(field.NullCount()), 0) + defer data.Release() + + return array.NewFixedSizeListData(data) +} + +func (ctx *arrayLoaderContext) loadStruct(dt *arrow.StructType) array.Interface { + field, buffers := ctx.loadCommon(1) + + arrs := make([]array.Interface, len(dt.Fields())) + subs := make([]*array.Data, len(dt.Fields())) + for i, f := range dt.Fields() { + arrs[i] = ctx.loadChild(f.Type) + subs[i] = arrs[i].Data() + } + defer func() { + for i := range arrs { + arrs[i].Release() + } + }() + + data := array.NewData(dt, int(field.Length()), buffers, subs, int(field.NullCount()), 0) + defer data.Release() + + return array.NewStructData(data) +} + +func readDictionary(meta *memory.Buffer, types dictTypeMap, r ReadAtSeeker) (int64, array.Interface, error) { + // msg := flatbuf.GetRootAsMessage(meta.Bytes(), 0) + // var dictBatch flatbuf.DictionaryBatch + // initFB(&dictBatch, msg.Header) + // + // id := dictBatch.Id() + // v, ok := types[id] + // if !ok { + // return id, nil, errors.Errorf("arrow/ipc: no type metadata for dictionary with ID=%d", id) + // } + // + // fields := []arrow.Field{v} + // + // // we need a schema for the record batch. + // schema := arrow.NewSchema(fields, nil) + // + // // the dictionary is embedded in a record batch with a single column. + // recBatch := dictBatch.Data(nil) + // + // var ( + // batchMeta *memory.Buffer + // body *memory.Buffer + // ) + // + // + // ctx := &arrayLoaderContext{ + // src: ipcSource{ + // meta: &md, + // r: bytes.NewReader(body.Bytes()), + // }, + // max: kMaxNestingDepth, + // } + // + // cols := make([]array.Interface, len(schema.Fields())) + // for i, field := range schema.Fields() { + // cols[i] = ctx.loadArray(field.Type) + // } + // + // batch := array.NewRecord(schema, cols, rows) + + panic("not implemented") +} diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/file_writer.go b/vendor/github.com/apache/arrow/go/arrow/ipc/file_writer.go new file mode 100644 index 000000000000..49e9594829ab --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/file_writer.go @@ -0,0 +1,333 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "encoding/binary" + "io" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/internal/flatbuf" + "github.com/apache/arrow/go/arrow/memory" + "golang.org/x/xerrors" +) + +type payloadWriter interface { + start() error + write(payload) error + Close() error +} + +type pwriter struct { + w io.WriteSeeker + pos int64 + + schema *arrow.Schema + dicts []fileBlock + recs []fileBlock +} + +func (w *pwriter) start() error { + var err error + + err = w.updatePos() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not update position while in start: %w", err) + } + + // only necessary to align to 8-byte boundary at the start of the file + _, err = w.Write(Magic) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not write magic Arrow bytes: %w", err) + } + + err = w.align(kArrowIPCAlignment) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not align start block: %w", err) + } + + return err +} + +func (w *pwriter) write(p payload) error { + blk := fileBlock{Offset: w.pos, Meta: 0, Body: p.size} + n, err := writeIPCPayload(w, p) + if err != nil { + return err + } + + blk.Meta = int32(n) + + err = w.updatePos() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not update position while in write-payload: %w", err) + } + + switch byte(p.msg) { + case flatbuf.MessageHeaderDictionaryBatch: + w.dicts = append(w.dicts, blk) + case flatbuf.MessageHeaderRecordBatch: + w.recs = append(w.recs, blk) + } + + return nil +} + +func (w *pwriter) Close() error { + var err error + + // write file footer + err = w.updatePos() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not update position while in close: %w", err) + } + + pos := w.pos + err = writeFileFooter(w.schema, w.dicts, w.recs, w) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not write file footer: %w", err) + } + + // write file footer length + err = w.updatePos() // not strictly needed as we passed w to writeFileFooter... + if err != nil { + return xerrors.Errorf("arrow/ipc: could not compute file footer length: %w", err) + } + + size := w.pos - pos + if size <= 0 { + return xerrors.Errorf("arrow/ipc: invalid file footer size (size=%d)", size) + } + + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, uint32(size)) + _, err = w.Write(buf) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not write file footer size: %w", err) + } + + _, err = w.Write(Magic) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not write Arrow magic bytes: %w", err) + } + + return nil +} + +func (w *pwriter) updatePos() error { + var err error + w.pos, err = w.w.Seek(0, io.SeekCurrent) + return err +} + +func (w *pwriter) align(align int32) error { + remainder := paddedLength(w.pos, align) - w.pos + if remainder == 0 { + return nil + } + + _, err := w.Write(paddingBytes[:int(remainder)]) + return err +} + +func (w *pwriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.pos += int64(n) + return n, err +} + +func writeIPCPayload(w io.Writer, p payload) (int, error) { + n, err := writeMessage(p.meta, kArrowIPCAlignment, w) + if err != nil { + return n, err + } + + // now write the buffers + for _, buf := range p.body { + var ( + size int64 + padding int64 + ) + + // the buffer might be null if we are handling zero row lengths. + if buf != nil { + size = int64(buf.Len()) + padding = bitutil.CeilByte64(size) - size + } + + if size > 0 { + _, err = w.Write(buf.Bytes()) + if err != nil { + return n, xerrors.Errorf("arrow/ipc: could not write payload message body: %w", err) + } + } + + if padding > 0 { + _, err = w.Write(paddingBytes[:padding]) + if err != nil { + return n, xerrors.Errorf("arrow/ipc: could not write payload message padding: %w", err) + } + } + } + + return n, err +} + +type payload struct { + msg MessageType + meta *memory.Buffer + body []*memory.Buffer + size int64 // length of body +} + +func (p *payload) Release() { + if p.meta != nil { + p.meta.Release() + p.meta = nil + } + for i, b := range p.body { + if b == nil { + continue + } + b.Release() + p.body[i] = nil + } +} + +type payloads []payload + +func (ps payloads) Release() { + for i := range ps { + ps[i].Release() + } +} + +// FileWriter is an Arrow file writer. +type FileWriter struct { + w io.WriteSeeker + + mem memory.Allocator + + header struct { + started bool + offset int64 + } + + footer struct { + written bool + } + + pw payloadWriter + + schema *arrow.Schema +} + +// NewFileWriter opens an Arrow file using the provided writer w. +func NewFileWriter(w io.WriteSeeker, opts ...Option) (*FileWriter, error) { + var ( + cfg = newConfig(opts...) + err error + ) + + f := FileWriter{ + w: w, + pw: &pwriter{w: w, schema: cfg.schema, pos: -1}, + mem: cfg.alloc, + schema: cfg.schema, + } + + pos, err := f.w.Seek(0, io.SeekCurrent) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not seek current position: %w", err) + } + f.header.offset = pos + + return &f, err +} + +func (f *FileWriter) Close() error { + err := f.checkStarted() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not write empty file: %w", err) + } + + if f.footer.written { + return nil + } + + err = f.pw.Close() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not close payload writer: %w", err) + } + f.footer.written = true + + return nil +} + +func (f *FileWriter) Write(rec array.Record) error { + schema := rec.Schema() + if schema == nil || !schema.Equal(f.schema) { + return errInconsistentSchema + } + + if err := f.checkStarted(); err != nil { + return xerrors.Errorf("arrow/ipc: could not write header: %w", err) + } + + const allow64b = true + var ( + data = payload{msg: MessageRecordBatch} + enc = newRecordEncoder(f.mem, 0, kMaxNestingDepth, allow64b) + ) + defer data.Release() + + if err := enc.Encode(&data, rec); err != nil { + return xerrors.Errorf("arrow/ipc: could not encode record to payload: %w", err) + } + + return f.pw.write(data) +} + +func (f *FileWriter) checkStarted() error { + if !f.header.started { + return f.start() + } + return nil +} + +func (f *FileWriter) start() error { + f.header.started = true + err := f.pw.start() + if err != nil { + return err + } + + // write out schema payloads + ps := payloadsFromSchema(f.schema, f.mem, nil) + defer ps.Release() + + for _, data := range ps { + err = f.pw.write(data) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/ipc.go b/vendor/github.com/apache/arrow/go/arrow/ipc/ipc.go new file mode 100644 index 000000000000..4c0eb4b61e61 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/ipc.go @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "io" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/arrio" + "github.com/apache/arrow/go/arrow/memory" +) + +const ( + errNotArrowFile = errString("arrow/ipc: not an Arrow file") + errInconsistentFileMetadata = errString("arrow/ipc: file is smaller than indicated metadata size") + errInconsistentSchema = errString("arrow/ipc: tried to write record batch with different schema") + errMaxRecursion = errString("arrow/ipc: max recursion depth reached") + errBigArray = errString("arrow/ipc: array larger than 2^31-1 in length") + + kArrowAlignment = 64 // buffers are padded to 64b boundaries (for SIMD) + kTensorAlignment = 64 // tensors are padded to 64b boundaries + kArrowIPCAlignment = 8 // align on 8b boundaries in IPC +) + +var ( + paddingBytes [kArrowAlignment]byte + kEOS = [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0, 0, 0, 0} // end of stream message + kIPCContToken uint32 = 0xFFFFFFFF // 32b continuation indicator for FlatBuffers 8b alignment +) + +func paddedLength(nbytes int64, alignment int32) int64 { + align := int64(alignment) + return ((nbytes + align - 1) / align) * align +} + +type errString string + +func (s errString) Error() string { + return string(s) +} + +type ReadAtSeeker interface { + io.Reader + io.Seeker + io.ReaderAt +} + +type config struct { + alloc memory.Allocator + schema *arrow.Schema + footer struct { + offset int64 + } +} + +func newConfig(opts ...Option) *config { + cfg := &config{ + alloc: memory.NewGoAllocator(), + } + + for _, opt := range opts { + opt(cfg) + } + + return cfg +} + +// Option is a functional option to configure opening or creating Arrow files +// and streams. +type Option func(*config) + +// WithFooterOffset specifies the Arrow footer position in bytes. +func WithFooterOffset(offset int64) Option { + return func(cfg *config) { + cfg.footer.offset = offset + } +} + +// WithAllocator specifies the Arrow memory allocator used while building records. +func WithAllocator(mem memory.Allocator) Option { + return func(cfg *config) { + cfg.alloc = mem + } +} + +// WithSchema specifies the Arrow schema to be used for reading or writing. +func WithSchema(schema *arrow.Schema) Option { + return func(cfg *config) { + cfg.schema = schema + } +} + +var ( + _ arrio.Reader = (*Reader)(nil) + _ arrio.Writer = (*Writer)(nil) + _ arrio.Reader = (*FileReader)(nil) + _ arrio.Writer = (*FileWriter)(nil) + + _ arrio.ReaderAt = (*FileReader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/message.go b/vendor/github.com/apache/arrow/go/arrow/ipc/message.go new file mode 100644 index 000000000000..0e7b6e66611c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/message.go @@ -0,0 +1,234 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "encoding/binary" + "fmt" + "io" + "sync/atomic" + + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/internal/flatbuf" + "github.com/apache/arrow/go/arrow/memory" + "golang.org/x/xerrors" +) + +// MetadataVersion represents the Arrow metadata version. +type MetadataVersion flatbuf.MetadataVersion + +const ( + MetadataV1 = MetadataVersion(flatbuf.MetadataVersionV1) // version for Arrow-0.1.0 + MetadataV2 = MetadataVersion(flatbuf.MetadataVersionV2) // version for Arrow-0.2.0 + MetadataV3 = MetadataVersion(flatbuf.MetadataVersionV3) // version for Arrow-0.3.0 to 0.7.1 + MetadataV4 = MetadataVersion(flatbuf.MetadataVersionV4) // version for >= Arrow-0.8.0 +) + +func (m MetadataVersion) String() string { + if v, ok := flatbuf.EnumNamesMetadataVersion[int16(m)]; ok { + return v + } + return fmt.Sprintf("MetadataVersion(%d)", int16(m)) +} + +// MessageType represents the type of Message in an Arrow format. +type MessageType flatbuf.MessageHeader + +const ( + MessageNone = MessageType(flatbuf.MessageHeaderNONE) + MessageSchema = MessageType(flatbuf.MessageHeaderSchema) + MessageDictionaryBatch = MessageType(flatbuf.MessageHeaderDictionaryBatch) + MessageRecordBatch = MessageType(flatbuf.MessageHeaderRecordBatch) + MessageTensor = MessageType(flatbuf.MessageHeaderTensor) + MessageSparseTensor = MessageType(flatbuf.MessageHeaderSparseTensor) +) + +func (m MessageType) String() string { + if v, ok := flatbuf.EnumNamesMessageHeader[byte(m)]; ok { + return v + } + return fmt.Sprintf("MessageType(%d)", int(m)) +} + +const ( + // maxNestingDepth is an arbitrary value to catch user mistakes. + // For deeply nested schemas, it is expected the user will indicate + // explicitly the maximum allowed recursion depth. + maxNestingDepth = 64 +) + +// Message is an IPC message, including metadata and body. +type Message struct { + refCount int64 + msg *flatbuf.Message + meta *memory.Buffer + body *memory.Buffer +} + +// NewMessage creates a new message from the metadata and body buffers. +// NewMessage panics if any of these buffers is nil. +func NewMessage(meta, body *memory.Buffer) *Message { + if meta == nil || body == nil { + panic("arrow/ipc: nil buffers") + } + meta.Retain() + body.Retain() + return &Message{ + refCount: 1, + msg: flatbuf.GetRootAsMessage(meta.Bytes(), 0), + meta: meta, + body: body, + } +} + +func newMessageFromFB(meta *flatbuf.Message, body *memory.Buffer) *Message { + if meta == nil || body == nil { + panic("arrow/ipc: nil buffers") + } + body.Retain() + return &Message{ + refCount: 1, + msg: meta, + meta: memory.NewBufferBytes(meta.Table().Bytes), + body: body, + } +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (msg *Message) Retain() { + atomic.AddInt64(&msg.refCount, 1) +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (msg *Message) Release() { + debug.Assert(atomic.LoadInt64(&msg.refCount) > 0, "too many releases") + + if atomic.AddInt64(&msg.refCount, -1) == 0 { + msg.meta.Release() + msg.body.Release() + msg.msg = nil + msg.meta = nil + msg.body = nil + } +} + +func (msg *Message) Version() MetadataVersion { + return MetadataVersion(msg.msg.Version()) +} + +func (msg *Message) Type() MessageType { + return MessageType(msg.msg.HeaderType()) +} + +func (msg *Message) BodyLen() int64 { + return msg.msg.BodyLength() +} + +// MessageReader reads messages from an io.Reader. +type MessageReader struct { + r io.Reader + + refCount int64 + msg *Message +} + +// NewMessageReader returns a reader that reads messages from an input stream. +func NewMessageReader(r io.Reader) *MessageReader { + return &MessageReader{r: r, refCount: 1} +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (r *MessageReader) Retain() { + atomic.AddInt64(&r.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (r *MessageReader) Release() { + debug.Assert(atomic.LoadInt64(&r.refCount) > 0, "too many releases") + + if atomic.AddInt64(&r.refCount, -1) == 0 { + if r.msg != nil { + r.msg.Release() + r.msg = nil + } + } +} + +// Message returns the current message that has been extracted from the +// underlying stream. +// It is valid until the next call to Message. +func (r *MessageReader) Message() (*Message, error) { + var buf = make([]byte, 4) + _, err := io.ReadFull(r.r, buf) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read continuation indicator: %w", err) + } + var ( + cid = binary.LittleEndian.Uint32(buf) + msgLen int32 + ) + switch cid { + case 0: + // EOS message. + return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error? + case kIPCContToken: + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read message length: %w", err) + } + msgLen = int32(binary.LittleEndian.Uint32(buf)) + if msgLen == 0 { + // optional 0 EOS control message + return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error? + } + + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + msgLen = int32(cid) + } + + buf = make([]byte, msgLen) + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read message metadata: %w", err) + } + + meta := flatbuf.GetRootAsMessage(buf, 0) + bodyLen := meta.BodyLength() + + buf = make([]byte, bodyLen) + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read message body: %w", err) + } + body := memory.NewBufferBytes(buf) + + if r.msg != nil { + r.msg.Release() + r.msg = nil + } + r.msg = newMessageFromFB(meta, body) + + return r.msg, nil +} diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/metadata.go b/vendor/github.com/apache/arrow/go/arrow/ipc/metadata.go new file mode 100644 index 000000000000..687cbe0a0eca --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/metadata.go @@ -0,0 +1,1058 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "encoding/binary" + "io" + "sort" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/internal/flatbuf" + "github.com/apache/arrow/go/arrow/memory" + flatbuffers "github.com/google/flatbuffers/go" + "golang.org/x/xerrors" +) + +// Magic string identifying an Apache Arrow file. +var Magic = []byte("ARROW1") + +const ( + currentMetadataVersion = MetadataV4 + minMetadataVersion = MetadataV4 + + kExtensionTypeKeyName = "arrow_extension_name" + kExtensionDataKeyName = "arrow_extension_data" + + // ARROW-109: We set this number arbitrarily to help catch user mistakes. For + // deeply nested schemas, it is expected the user will indicate explicitly the + // maximum allowed recursion depth + kMaxNestingDepth = 64 +) + +type startVecFunc func(b *flatbuffers.Builder, n int) flatbuffers.UOffsetT + +type fieldMetadata struct { + Len int64 + Nulls int64 + Offset int64 +} + +type bufferMetadata struct { + Offset int64 // relative offset into the memory page to the starting byte of the buffer + Len int64 // absolute length in bytes of the buffer +} + +type fileBlock struct { + Offset int64 + Meta int32 + Body int64 + + r io.ReaderAt +} + +func fileBlocksToFB(b *flatbuffers.Builder, blocks []fileBlock, start startVecFunc) flatbuffers.UOffsetT { + start(b, len(blocks)) + for i := len(blocks) - 1; i >= 0; i-- { + blk := blocks[i] + flatbuf.CreateBlock(b, blk.Offset, blk.Meta, blk.Body) + } + + return b.EndVector(len(blocks)) +} + +func (blk fileBlock) NewMessage() (*Message, error) { + var ( + err error + buf []byte + r = blk.section() + ) + + buf = make([]byte, blk.Meta) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read message metadata: %w", err) + } + + prefix := 0 + switch binary.LittleEndian.Uint32(buf) { + case 0: + case kIPCContToken: + prefix = 8 + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + prefix = 4 + } + + meta := memory.NewBufferBytes(buf[prefix:]) // drop buf-size already known from blk.Meta + + buf = make([]byte, blk.Body) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read message body: %w", err) + } + body := memory.NewBufferBytes(buf) + + return NewMessage(meta, body), nil +} + +func (blk fileBlock) section() io.Reader { + return io.NewSectionReader(blk.r, blk.Offset, int64(blk.Meta)+blk.Body) +} + +func unitFromFB(unit flatbuf.TimeUnit) arrow.TimeUnit { + switch unit { + case flatbuf.TimeUnitSECOND: + return arrow.Second + case flatbuf.TimeUnitMILLISECOND: + return arrow.Millisecond + case flatbuf.TimeUnitMICROSECOND: + return arrow.Microsecond + case flatbuf.TimeUnitNANOSECOND: + return arrow.Nanosecond + default: + panic(xerrors.Errorf("arrow/ipc: invalid flatbuf.TimeUnit(%d) value", unit)) + } +} + +func unitToFB(unit arrow.TimeUnit) flatbuf.TimeUnit { + switch unit { + case arrow.Second: + return flatbuf.TimeUnitSECOND + case arrow.Millisecond: + return flatbuf.TimeUnitMILLISECOND + case arrow.Microsecond: + return flatbuf.TimeUnitMICROSECOND + case arrow.Nanosecond: + return flatbuf.TimeUnitNANOSECOND + default: + panic(xerrors.Errorf("arrow/ipc: invalid arrow.TimeUnit(%d) value", unit)) + } +} + +// initFB is a helper function to handle flatbuffers' polymorphism. +func initFB(t interface { + Table() flatbuffers.Table + Init([]byte, flatbuffers.UOffsetT) +}, f func(tbl *flatbuffers.Table) bool) { + tbl := t.Table() + if !f(&tbl) { + panic(xerrors.Errorf("arrow/ipc: could not initialize %T from flatbuffer", t)) + } + t.Init(tbl.Bytes, tbl.Pos) +} + +func fieldFromFB(field *flatbuf.Field, memo *dictMemo) (arrow.Field, error) { + var ( + err error + o arrow.Field + ) + + o.Name = string(field.Name()) + o.Nullable = field.Nullable() + o.Metadata, err = metadataFromFB(field) + if err != nil { + return o, err + } + + encoding := field.Dictionary(nil) + switch encoding { + case nil: + n := field.ChildrenLength() + children := make([]arrow.Field, n) + for i := range children { + var childFB flatbuf.Field + if !field.Children(&childFB, i) { + return o, xerrors.Errorf("arrow/ipc: could not load field child %d", i) + } + child, err := fieldFromFB(&childFB, memo) + if err != nil { + return o, xerrors.Errorf("arrow/ipc: could not convert field child %d: %w", i, err) + } + children[i] = child + } + + o.Type, err = typeFromFB(field, children, o.Metadata) + if err != nil { + return o, xerrors.Errorf("arrow/ipc: could not convert field type: %w", err) + } + default: + panic("not implemented") // FIXME(sbinet) + } + + return o, nil +} + +func fieldToFB(b *flatbuffers.Builder, field arrow.Field, memo *dictMemo) flatbuffers.UOffsetT { + var visitor = fieldVisitor{b: b, memo: memo, meta: make(map[string]string)} + return visitor.result(field) +} + +type fieldVisitor struct { + b *flatbuffers.Builder + memo *dictMemo + dtype flatbuf.Type + offset flatbuffers.UOffsetT + kids []flatbuffers.UOffsetT + meta map[string]string +} + +func (fv *fieldVisitor) visit(field arrow.Field) { + dt := field.Type + switch dt := dt.(type) { + case *arrow.NullType: + fv.dtype = flatbuf.TypeNull + flatbuf.NullStart(fv.b) + fv.offset = flatbuf.NullEnd(fv.b) + + case *arrow.BooleanType: + fv.dtype = flatbuf.TypeBool + flatbuf.BoolStart(fv.b) + fv.offset = flatbuf.BoolEnd(fv.b) + + case *arrow.Uint8Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint16Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint32Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint64Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Int8Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int16Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int32Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int64Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Float16Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Float32Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Float64Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Decimal128Type: + fv.dtype = flatbuf.TypeDecimal + flatbuf.DecimalStart(fv.b) + flatbuf.DecimalAddPrecision(fv.b, dt.Precision) + flatbuf.DecimalAddScale(fv.b, dt.Scale) + fv.offset = flatbuf.DecimalEnd(fv.b) + + case *arrow.FixedSizeBinaryType: + fv.dtype = flatbuf.TypeFixedSizeBinary + flatbuf.FixedSizeBinaryStart(fv.b) + flatbuf.FixedSizeBinaryAddByteWidth(fv.b, int32(dt.ByteWidth)) + fv.offset = flatbuf.FixedSizeBinaryEnd(fv.b) + + case *arrow.BinaryType: + fv.dtype = flatbuf.TypeBinary + flatbuf.BinaryStart(fv.b) + fv.offset = flatbuf.BinaryEnd(fv.b) + + case *arrow.StringType: + fv.dtype = flatbuf.TypeUtf8 + flatbuf.Utf8Start(fv.b) + fv.offset = flatbuf.Utf8End(fv.b) + + case *arrow.Date32Type: + fv.dtype = flatbuf.TypeDate + flatbuf.DateStart(fv.b) + flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitDAY) + fv.offset = flatbuf.DateEnd(fv.b) + + case *arrow.Date64Type: + fv.dtype = flatbuf.TypeDate + flatbuf.DateStart(fv.b) + flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitMILLISECOND) + fv.offset = flatbuf.DateEnd(fv.b) + + case *arrow.Time32Type: + fv.dtype = flatbuf.TypeTime + flatbuf.TimeStart(fv.b) + flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit)) + flatbuf.TimeAddBitWidth(fv.b, 32) + fv.offset = flatbuf.TimeEnd(fv.b) + + case *arrow.Time64Type: + fv.dtype = flatbuf.TypeTime + flatbuf.TimeStart(fv.b) + flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit)) + flatbuf.TimeAddBitWidth(fv.b, 64) + fv.offset = flatbuf.TimeEnd(fv.b) + + case *arrow.TimestampType: + fv.dtype = flatbuf.TypeTimestamp + unit := unitToFB(dt.Unit) + var tz flatbuffers.UOffsetT + if dt.TimeZone != "" { + tz = fv.b.CreateString(dt.TimeZone) + } + flatbuf.TimestampStart(fv.b) + flatbuf.TimestampAddUnit(fv.b, unit) + flatbuf.TimestampAddTimezone(fv.b, tz) + fv.offset = flatbuf.TimestampEnd(fv.b) + + case *arrow.StructType: + fv.dtype = flatbuf.TypeStruct_ + offsets := make([]flatbuffers.UOffsetT, len(dt.Fields())) + for i, field := range dt.Fields() { + offsets[i] = fieldToFB(fv.b, field, fv.memo) + } + flatbuf.Struct_Start(fv.b) + for i := len(offsets) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(offsets[i]) + } + fv.offset = flatbuf.Struct_End(fv.b) + fv.kids = append(fv.kids, offsets...) + + case *arrow.ListType: + fv.dtype = flatbuf.TypeList + fv.kids = append(fv.kids, fieldToFB(fv.b, arrow.Field{Name: "item", Type: dt.Elem(), Nullable: field.Nullable}, fv.memo)) + flatbuf.ListStart(fv.b) + fv.offset = flatbuf.ListEnd(fv.b) + + case *arrow.FixedSizeListType: + fv.dtype = flatbuf.TypeFixedSizeList + fv.kids = append(fv.kids, fieldToFB(fv.b, arrow.Field{Name: "item", Type: dt.Elem(), Nullable: field.Nullable}, fv.memo)) + flatbuf.FixedSizeListStart(fv.b) + flatbuf.FixedSizeListAddListSize(fv.b, dt.Len()) + fv.offset = flatbuf.FixedSizeListEnd(fv.b) + + case *arrow.MonthIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitYEAR_MONTH) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.DayTimeIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitDAY_TIME) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.DurationType: + fv.dtype = flatbuf.TypeDuration + unit := unitToFB(dt.Unit) + flatbuf.DurationStart(fv.b) + flatbuf.DurationAddUnit(fv.b, unit) + fv.offset = flatbuf.DurationEnd(fv.b) + + default: + err := xerrors.Errorf("arrow/ipc: invalid data type %v", dt) + panic(err) // FIXME(sbinet): implement all data-types. + } +} + +func (fv *fieldVisitor) result(field arrow.Field) flatbuffers.UOffsetT { + nameFB := fv.b.CreateString(field.Name) + + fv.visit(field) + + flatbuf.FieldStartChildrenVector(fv.b, len(fv.kids)) + for i := len(fv.kids) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(fv.kids[i]) + } + kidsFB := fv.b.EndVector(len(fv.kids)) + + var dictFB flatbuffers.UOffsetT + if field.Type.ID() == arrow.DICTIONARY { + panic("not implemented") // FIXME(sbinet) + } + + var ( + metaFB flatbuffers.UOffsetT + kvs []flatbuffers.UOffsetT + ) + for i, k := range field.Metadata.Keys() { + v := field.Metadata.Values()[i] + kk := fv.b.CreateString(k) + vv := fv.b.CreateString(v) + flatbuf.KeyValueStart(fv.b) + flatbuf.KeyValueAddKey(fv.b, kk) + flatbuf.KeyValueAddValue(fv.b, vv) + kvs = append(kvs, flatbuf.KeyValueEnd(fv.b)) + } + { + keys := make([]string, 0, len(fv.meta)) + for k := range fv.meta { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := fv.meta[k] + kk := fv.b.CreateString(k) + vv := fv.b.CreateString(v) + flatbuf.KeyValueStart(fv.b) + flatbuf.KeyValueAddKey(fv.b, kk) + flatbuf.KeyValueAddValue(fv.b, vv) + kvs = append(kvs, flatbuf.KeyValueEnd(fv.b)) + } + } + if len(kvs) > 0 { + flatbuf.FieldStartCustomMetadataVector(fv.b, len(kvs)) + for i := len(kvs) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(kvs[i]) + } + metaFB = fv.b.EndVector(len(kvs)) + } + + flatbuf.FieldStart(fv.b) + flatbuf.FieldAddName(fv.b, nameFB) + flatbuf.FieldAddNullable(fv.b, field.Nullable) + flatbuf.FieldAddTypeType(fv.b, fv.dtype) + flatbuf.FieldAddType(fv.b, fv.offset) + flatbuf.FieldAddDictionary(fv.b, dictFB) + flatbuf.FieldAddChildren(fv.b, kidsFB) + flatbuf.FieldAddCustomMetadata(fv.b, metaFB) + + offset := flatbuf.FieldEnd(fv.b) + + return offset +} + +func fieldFromFBDict(field *flatbuf.Field) (arrow.Field, error) { + var ( + o = arrow.Field{ + Name: string(field.Name()), + Nullable: field.Nullable(), + } + err error + memo = newMemo() + ) + + // any DictionaryEncoding set is ignored here. + + kids := make([]arrow.Field, field.ChildrenLength()) + for i := range kids { + var kid flatbuf.Field + if !field.Children(&kid, i) { + return o, xerrors.Errorf("arrow/ipc: could not load field child %d", i) + } + kids[i], err = fieldFromFB(&kid, &memo) + if err != nil { + return o, xerrors.Errorf("arrow/ipc: field from dict: %w", err) + } + } + + meta, err := metadataFromFB(field) + if err != nil { + return o, xerrors.Errorf("arrow/ipc: metadata for field from dict: %w", err) + } + + o.Type, err = typeFromFB(field, kids, meta) + if err != nil { + return o, xerrors.Errorf("arrow/ipc: type for field from dict: %w", err) + } + + return o, nil +} + +func typeFromFB(field *flatbuf.Field, children []arrow.Field, md arrow.Metadata) (arrow.DataType, error) { + var data flatbuffers.Table + if !field.Type(&data) { + return nil, xerrors.Errorf("arrow/ipc: could not load field type data") + } + + dt, err := concreteTypeFromFB(field.TypeType(), data, children) + if err != nil { + return dt, err + } + + // look for extension metadata in custom metadata field. + if md.Len() > 0 { + i := md.FindKey(kExtensionTypeKeyName) + if i < 0 { + return dt, err + } + + panic("not implemented") // FIXME(sbinet) + } + + return dt, err +} + +func concreteTypeFromFB(typ flatbuf.Type, data flatbuffers.Table, children []arrow.Field) (arrow.DataType, error) { + var ( + dt arrow.DataType + err error + ) + + switch typ { + case flatbuf.TypeNONE: + return nil, xerrors.Errorf("arrow/ipc: Type metadata cannot be none") + + case flatbuf.TypeNull: + return arrow.Null, nil + + case flatbuf.TypeInt: + var dt flatbuf.Int + dt.Init(data.Bytes, data.Pos) + return intFromFB(dt) + + case flatbuf.TypeFloatingPoint: + var dt flatbuf.FloatingPoint + dt.Init(data.Bytes, data.Pos) + return floatFromFB(dt) + + case flatbuf.TypeDecimal: + var dt flatbuf.Decimal + dt.Init(data.Bytes, data.Pos) + return decimalFromFB(dt) + + case flatbuf.TypeBinary: + return arrow.BinaryTypes.Binary, nil + + case flatbuf.TypeFixedSizeBinary: + var dt flatbuf.FixedSizeBinary + dt.Init(data.Bytes, data.Pos) + return &arrow.FixedSizeBinaryType{ByteWidth: int(dt.ByteWidth())}, nil + + case flatbuf.TypeUtf8: + return arrow.BinaryTypes.String, nil + + case flatbuf.TypeBool: + return arrow.FixedWidthTypes.Boolean, nil + + case flatbuf.TypeList: + if len(children) != 1 { + return nil, xerrors.Errorf("arrow/ipc: List must have exactly 1 child field (got=%d)", len(children)) + } + return arrow.ListOf(children[0].Type), nil + + case flatbuf.TypeFixedSizeList: + var dt flatbuf.FixedSizeList + dt.Init(data.Bytes, data.Pos) + if len(children) != 1 { + return nil, xerrors.Errorf("arrow/ipc: FixedSizeList must have exactly 1 child field (got=%d)", len(children)) + } + return arrow.FixedSizeListOf(dt.ListSize(), children[0].Type), nil + + case flatbuf.TypeStruct_: + return arrow.StructOf(children...), nil + + case flatbuf.TypeTime: + var dt flatbuf.Time + dt.Init(data.Bytes, data.Pos) + return timeFromFB(dt) + + case flatbuf.TypeTimestamp: + var dt flatbuf.Timestamp + dt.Init(data.Bytes, data.Pos) + return timestampFromFB(dt) + + case flatbuf.TypeDate: + var dt flatbuf.Date + dt.Init(data.Bytes, data.Pos) + return dateFromFB(dt) + + case flatbuf.TypeInterval: + var dt flatbuf.Interval + dt.Init(data.Bytes, data.Pos) + return intervalFromFB(dt) + + case flatbuf.TypeDuration: + var dt flatbuf.Duration + dt.Init(data.Bytes, data.Pos) + return durationFromFB(dt) + + default: + // FIXME(sbinet): implement all the other types. + panic(xerrors.Errorf("arrow/ipc: type %v not implemented", flatbuf.EnumNamesType[typ])) + } + + return dt, err +} + +func intFromFB(data flatbuf.Int) (arrow.DataType, error) { + bw := data.BitWidth() + if bw > 64 { + return nil, xerrors.Errorf("arrow/ipc: integers with more than 64 bits not implemented (bits=%d)", bw) + } + if bw < 8 { + return nil, xerrors.Errorf("arrow/ipc: integers with less than 8 bits not implemented (bits=%d)", bw) + } + + switch bw { + case 8: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint8, nil + } + return arrow.PrimitiveTypes.Int8, nil + + case 16: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint16, nil + } + return arrow.PrimitiveTypes.Int16, nil + + case 32: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint32, nil + } + return arrow.PrimitiveTypes.Int32, nil + + case 64: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint64, nil + } + return arrow.PrimitiveTypes.Int64, nil + default: + return nil, xerrors.Errorf("arrow/ipc: integers not in cstdint are not implemented") + } +} + +func intToFB(b *flatbuffers.Builder, bw int32, isSigned bool) flatbuffers.UOffsetT { + flatbuf.IntStart(b) + flatbuf.IntAddBitWidth(b, bw) + flatbuf.IntAddIsSigned(b, isSigned) + return flatbuf.IntEnd(b) +} + +func floatFromFB(data flatbuf.FloatingPoint) (arrow.DataType, error) { + switch p := data.Precision(); p { + case flatbuf.PrecisionHALF: + return arrow.FixedWidthTypes.Float16, nil + case flatbuf.PrecisionSINGLE: + return arrow.PrimitiveTypes.Float32, nil + case flatbuf.PrecisionDOUBLE: + return arrow.PrimitiveTypes.Float64, nil + default: + return nil, xerrors.Errorf("arrow/ipc: floating point type with %d precision not implemented", p) + } +} + +func floatToFB(b *flatbuffers.Builder, bw int32) flatbuffers.UOffsetT { + switch bw { + case 16: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionHALF) + return flatbuf.FloatingPointEnd(b) + case 32: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionSINGLE) + return flatbuf.FloatingPointEnd(b) + case 64: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionDOUBLE) + return flatbuf.FloatingPointEnd(b) + default: + panic(xerrors.Errorf("arrow/ipc: invalid floating point precision %d-bits", bw)) + } +} + +func decimalFromFB(data flatbuf.Decimal) (arrow.DataType, error) { + return &arrow.Decimal128Type{Precision: data.Precision(), Scale: data.Scale()}, nil +} + +func timeFromFB(data flatbuf.Time) (arrow.DataType, error) { + bw := data.BitWidth() + unit := unitFromFB(data.Unit()) + + switch bw { + case 32: + switch unit { + case arrow.Millisecond: + return arrow.FixedWidthTypes.Time32ms, nil + case arrow.Second: + return arrow.FixedWidthTypes.Time32s, nil + default: + return nil, xerrors.Errorf("arrow/ipc: Time32 type with %v unit not implemented", unit) + } + case 64: + switch unit { + case arrow.Nanosecond: + return arrow.FixedWidthTypes.Time64ns, nil + case arrow.Microsecond: + return arrow.FixedWidthTypes.Time64us, nil + default: + return nil, xerrors.Errorf("arrow/ipc: Time64 type with %v unit not implemented", unit) + } + default: + return nil, xerrors.Errorf("arrow/ipc: Time type with %d bitwidth not implemented", bw) + } +} + +func timestampFromFB(data flatbuf.Timestamp) (arrow.DataType, error) { + unit := unitFromFB(data.Unit()) + tz := string(data.Timezone()) + return &arrow.TimestampType{Unit: unit, TimeZone: tz}, nil +} + +func dateFromFB(data flatbuf.Date) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.DateUnitDAY: + return arrow.FixedWidthTypes.Date32, nil + case flatbuf.DateUnitMILLISECOND: + return arrow.FixedWidthTypes.Date64, nil + } + return nil, xerrors.Errorf("arrow/ipc: Date type with %d unit not implemented", data.Unit()) +} + +func intervalFromFB(data flatbuf.Interval) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.IntervalUnitYEAR_MONTH: + return arrow.FixedWidthTypes.MonthInterval, nil + case flatbuf.IntervalUnitDAY_TIME: + return arrow.FixedWidthTypes.DayTimeInterval, nil + } + return nil, xerrors.Errorf("arrow/ipc: Interval type with %d unit not implemented", data.Unit()) +} + +func durationFromFB(data flatbuf.Duration) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.TimeUnitSECOND: + return arrow.FixedWidthTypes.Duration_s, nil + case flatbuf.TimeUnitMILLISECOND: + return arrow.FixedWidthTypes.Duration_ms, nil + case flatbuf.TimeUnitMICROSECOND: + return arrow.FixedWidthTypes.Duration_us, nil + case flatbuf.TimeUnitNANOSECOND: + return arrow.FixedWidthTypes.Duration_ns, nil + } + return nil, xerrors.Errorf("arrow/ipc: Duration type with %d unit not implemented", data.Unit()) +} + +type customMetadataer interface { + CustomMetadataLength() int + CustomMetadata(*flatbuf.KeyValue, int) bool +} + +func metadataFromFB(md customMetadataer) (arrow.Metadata, error) { + var ( + keys = make([]string, md.CustomMetadataLength()) + vals = make([]string, md.CustomMetadataLength()) + ) + + for i := range keys { + var kv flatbuf.KeyValue + if !md.CustomMetadata(&kv, i) { + return arrow.Metadata{}, xerrors.Errorf("arrow/ipc: could not read key-value %d from flatbuffer", i) + } + keys[i] = string(kv.Key()) + vals[i] = string(kv.Value()) + } + + return arrow.NewMetadata(keys, vals), nil +} + +func metadataToFB(b *flatbuffers.Builder, meta arrow.Metadata, start startVecFunc) flatbuffers.UOffsetT { + if meta.Len() == 0 { + return 0 + } + + n := meta.Len() + kvs := make([]flatbuffers.UOffsetT, n) + for i := range kvs { + k := b.CreateString(meta.Keys()[i]) + v := b.CreateString(meta.Values()[i]) + flatbuf.KeyValueStart(b) + flatbuf.KeyValueAddKey(b, k) + flatbuf.KeyValueAddValue(b, v) + kvs[i] = flatbuf.KeyValueEnd(b) + } + + start(b, n) + for i := n - 1; i >= 0; i-- { + b.PrependUOffsetT(kvs[i]) + } + return b.EndVector(n) +} + +func schemaFromFB(schema *flatbuf.Schema, memo *dictMemo) (*arrow.Schema, error) { + var ( + err error + fields = make([]arrow.Field, schema.FieldsLength()) + ) + + for i := range fields { + var field flatbuf.Field + if !schema.Fields(&field, i) { + return nil, xerrors.Errorf("arrow/ipc: could not read field %d from schema", i) + } + + fields[i], err = fieldFromFB(&field, memo) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not convert field %d from flatbuf: %w", i, err) + } + } + + md, err := metadataFromFB(schema) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not convert schema metadata from flatbuf: %w", err) + } + + return arrow.NewSchema(fields, &md), nil +} + +func schemaToFB(b *flatbuffers.Builder, schema *arrow.Schema, memo *dictMemo) flatbuffers.UOffsetT { + fields := make([]flatbuffers.UOffsetT, len(schema.Fields())) + for i, field := range schema.Fields() { + fields[i] = fieldToFB(b, field, memo) + } + + flatbuf.SchemaStartFieldsVector(b, len(fields)) + for i := len(fields) - 1; i >= 0; i-- { + b.PrependUOffsetT(fields[i]) + } + fieldsFB := b.EndVector(len(fields)) + + metaFB := metadataToFB(b, schema.Metadata(), flatbuf.SchemaStartCustomMetadataVector) + + flatbuf.SchemaStart(b) + flatbuf.SchemaAddEndianness(b, flatbuf.EndiannessLittle) + flatbuf.SchemaAddFields(b, fieldsFB) + flatbuf.SchemaAddCustomMetadata(b, metaFB) + offset := flatbuf.SchemaEnd(b) + + return offset +} + +func dictTypesFromFB(schema *flatbuf.Schema) (dictTypeMap, error) { + var ( + err error + fields = make(dictTypeMap, schema.FieldsLength()) + ) + for i := 0; i < schema.FieldsLength(); i++ { + var field flatbuf.Field + if !schema.Fields(&field, i) { + return nil, xerrors.Errorf("arrow/ipc: could not load field %d from schema", i) + } + fields, err = visitField(&field, fields) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not visit field %d from schema: %w", i, err) + } + } + return fields, err +} + +func visitField(field *flatbuf.Field, dict dictTypeMap) (dictTypeMap, error) { + var err error + meta := field.Dictionary(nil) + switch meta { + case nil: + // field is not dictionary encoded. + // => visit children. + for i := 0; i < field.ChildrenLength(); i++ { + var child flatbuf.Field + if !field.Children(&child, i) { + return nil, xerrors.Errorf("arrow/ipc: could not visit child %d from field", i) + } + dict, err = visitField(&child, dict) + if err != nil { + return nil, err + } + } + default: + // field is dictionary encoded. + // construct the data type for the dictionary: no descendants can be dict-encoded. + dfield, err := fieldFromFBDict(field) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not create data type for dictionary: %w", err) + } + dict[meta.Id()] = dfield + } + return dict, err +} + +// payloadsFromSchema returns a slice of payloads corresponding to the given schema. +// Callers of payloadsFromSchema will need to call Release after use. +func payloadsFromSchema(schema *arrow.Schema, mem memory.Allocator, memo *dictMemo) payloads { + dict := newMemo() + + ps := make(payloads, 1, dict.Len()+1) + ps[0].msg = MessageSchema + ps[0].meta = writeSchemaMessage(schema, mem, &dict) + + // append dictionaries. + if dict.Len() > 0 { + panic("payloads-from-schema: not-implemented") + // for id, arr := range dict.id2dict { + // // GetSchemaPayloads: writer.cc:535 + // } + } + + if memo != nil { + *memo = dict + } + + return ps +} + +func writeFBBuilder(b *flatbuffers.Builder, mem memory.Allocator) *memory.Buffer { + raw := b.FinishedBytes() + buf := memory.NewResizableBuffer(mem) + buf.Resize(len(raw)) + copy(buf.Bytes(), raw) + return buf +} + +func writeMessageFB(b *flatbuffers.Builder, mem memory.Allocator, hdrType flatbuf.MessageHeader, hdr flatbuffers.UOffsetT, bodyLen int64) *memory.Buffer { + + flatbuf.MessageStart(b) + flatbuf.MessageAddVersion(b, int16(currentMetadataVersion)) + flatbuf.MessageAddHeaderType(b, hdrType) + flatbuf.MessageAddHeader(b, hdr) + flatbuf.MessageAddBodyLength(b, bodyLen) + msg := flatbuf.MessageEnd(b) + b.Finish(msg) + + return writeFBBuilder(b, mem) +} + +func writeSchemaMessage(schema *arrow.Schema, mem memory.Allocator, dict *dictMemo) *memory.Buffer { + b := flatbuffers.NewBuilder(1024) + schemaFB := schemaToFB(b, schema, dict) + return writeMessageFB(b, mem, flatbuf.MessageHeaderSchema, schemaFB, 0) +} + +func writeFileFooter(schema *arrow.Schema, dicts, recs []fileBlock, w io.Writer) error { + var ( + b = flatbuffers.NewBuilder(1024) + memo = newMemo() + ) + + schemaFB := schemaToFB(b, schema, &memo) + dictsFB := fileBlocksToFB(b, dicts, flatbuf.FooterStartDictionariesVector) + recsFB := fileBlocksToFB(b, recs, flatbuf.FooterStartRecordBatchesVector) + + flatbuf.FooterStart(b) + flatbuf.FooterAddVersion(b, int16(currentMetadataVersion)) + flatbuf.FooterAddSchema(b, schemaFB) + flatbuf.FooterAddDictionaries(b, dictsFB) + flatbuf.FooterAddRecordBatches(b, recsFB) + footer := flatbuf.FooterEnd(b) + + b.Finish(footer) + + _, err := w.Write(b.FinishedBytes()) + return err +} + +func writeRecordMessage(mem memory.Allocator, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata) *memory.Buffer { + b := flatbuffers.NewBuilder(0) + recFB := recordToFB(b, size, bodyLength, fields, meta) + return writeMessageFB(b, mem, flatbuf.MessageHeaderRecordBatch, recFB, bodyLength) +} + +func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata) flatbuffers.UOffsetT { + fieldsFB := writeFieldNodes(b, fields, flatbuf.RecordBatchStartNodesVector) + metaFB := writeBuffers(b, meta, flatbuf.RecordBatchStartBuffersVector) + + flatbuf.RecordBatchStart(b) + flatbuf.RecordBatchAddLength(b, size) + flatbuf.RecordBatchAddNodes(b, fieldsFB) + flatbuf.RecordBatchAddBuffers(b, metaFB) + return flatbuf.RecordBatchEnd(b) +} + +func writeFieldNodes(b *flatbuffers.Builder, fields []fieldMetadata, start startVecFunc) flatbuffers.UOffsetT { + + start(b, len(fields)) + for i := len(fields) - 1; i >= 0; i-- { + field := fields[i] + if field.Offset != 0 { + panic(xerrors.Errorf("arrow/ipc: field metadata for IPC must have offset 0")) + } + flatbuf.CreateFieldNode(b, field.Len, field.Nulls) + } + + return b.EndVector(len(fields)) +} + +func writeBuffers(b *flatbuffers.Builder, buffers []bufferMetadata, start startVecFunc) flatbuffers.UOffsetT { + start(b, len(buffers)) + for i := len(buffers) - 1; i >= 0; i-- { + buffer := buffers[i] + flatbuf.CreateBuffer(b, buffer.Offset, buffer.Len) + } + return b.EndVector(len(buffers)) +} + +func writeMessage(msg *memory.Buffer, alignment int32, w io.Writer) (int, error) { + var ( + n int + err error + ) + + // ARROW-3212: we do not make any assumption on whether the output stream is aligned or not. + paddedMsgLen := int32(msg.Len()) + 8 + remainder := paddedMsgLen % alignment + if remainder != 0 { + paddedMsgLen += alignment - remainder + } + + tmp := make([]byte, 4) + + // write continuation indicator, to address 8-byte alignment requirement from FlatBuffers. + binary.LittleEndian.PutUint32(tmp, kIPCContToken) + _, err = w.Write(tmp) + if err != nil { + return 0, xerrors.Errorf("arrow/ipc: could not write continuation bit indicator: %w", err) + } + + // the returned message size includes the length prefix, the flatbuffer, + padding + n = int(paddedMsgLen) + + // write the flatbuffer size prefix, including padding + sizeFB := paddedMsgLen - 8 + binary.LittleEndian.PutUint32(tmp, uint32(sizeFB)) + _, err = w.Write(tmp) + if err != nil { + return n, xerrors.Errorf("arrow/ipc: could not write message flatbuffer size prefix: %w", err) + } + + // write the flatbuffer + _, err = w.Write(msg.Bytes()) + if err != nil { + return n, xerrors.Errorf("arrow/ipc: could not write message flatbuffer: %w", err) + } + + // write any padding + padding := paddedMsgLen - int32(msg.Len()) - 8 + if padding > 0 { + _, err = w.Write(paddingBytes[:padding]) + if err != nil { + return n, xerrors.Errorf("arrow/ipc: could not write message padding bytes: %w", err) + } + } + + return n, err +} diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/reader.go b/vendor/github.com/apache/arrow/go/arrow/ipc/reader.go new file mode 100644 index 000000000000..d32f13f818c6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/reader.go @@ -0,0 +1,202 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "bytes" + "io" + "sync/atomic" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/internal/debug" + "github.com/apache/arrow/go/arrow/internal/flatbuf" + "github.com/apache/arrow/go/arrow/memory" + "golang.org/x/xerrors" +) + +// Reader reads records from an io.Reader. +// Reader expects a schema (plus any dictionaries) as the first messages +// in the stream, followed by records. +type Reader struct { + r *MessageReader + schema *arrow.Schema + + refCount int64 + rec array.Record + err error + + types dictTypeMap + memo dictMemo + + mem memory.Allocator + + done bool +} + +// NewReader returns a reader that reads records from an input stream. +func NewReader(r io.Reader, opts ...Option) (*Reader, error) { + cfg := newConfig() + for _, opt := range opts { + opt(cfg) + } + + rr := &Reader{ + r: NewMessageReader(r), + types: make(dictTypeMap), + memo: newMemo(), + mem: cfg.alloc, + } + + err := rr.readSchema(cfg.schema) + if err != nil { + return nil, xerrors.Errorf("arrow/ipc: could not read schema from stream: %w", err) + } + + return rr, nil +} + +// Err returns the last error encountered during the iteration over the +// underlying stream. +func (r *Reader) Err() error { return r.err } + +func (r *Reader) Schema() *arrow.Schema { return r.schema } + +func (r *Reader) readSchema(schema *arrow.Schema) error { + msg, err := r.r.Message() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not read message schema: %w", err) + } + + if msg.Type() != MessageSchema { + return xerrors.Errorf("arrow/ipc: invalid message type (got=%v, want=%v)", msg.Type(), MessageSchema) + } + + // FIXME(sbinet) refactor msg-header handling. + var schemaFB flatbuf.Schema + initFB(&schemaFB, msg.msg.Header) + + r.types, err = dictTypesFromFB(&schemaFB) + if err != nil { + return xerrors.Errorf("arrow/ipc: could read dictionary types from message schema: %w", err) + } + + // TODO(sbinet): in the future, we may want to reconcile IDs in the stream with + // those found in the schema. + for range r.types { + panic("not implemented") // FIXME(sbinet): ReadNextDictionary + } + + r.schema, err = schemaFromFB(&schemaFB, &r.memo) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not decode schema from message schema: %w", err) + } + + // check the provided schema match the one read from stream. + if schema != nil && !schema.Equal(r.schema) { + return errInconsistentSchema + } + + return nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (r *Reader) Retain() { + atomic.AddInt64(&r.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (r *Reader) Release() { + debug.Assert(atomic.LoadInt64(&r.refCount) > 0, "too many releases") + + if atomic.AddInt64(&r.refCount, -1) == 0 { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + if r.r != nil { + r.r.Release() + r.r = nil + } + } +} + +// Next returns whether a Record could be extracted from the underlying stream. +func (r *Reader) Next() bool { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + + if r.err != nil || r.done { + return false + } + + return r.next() +} + +func (r *Reader) next() bool { + var msg *Message + msg, r.err = r.r.Message() + if r.err != nil { + r.done = true + if r.err == io.EOF { + r.err = nil + } + return false + } + + if got, want := msg.Type(), MessageRecordBatch; got != want { + r.err = xerrors.Errorf("arrow/ipc: invalid message type (got=%v, want=%v", got, want) + return false + } + + r.rec = newRecord(r.schema, msg.meta, bytes.NewReader(msg.body.Bytes())) + return true +} + +// Record returns the current record that has been extracted from the +// underlying stream. +// It is valid until the next call to Next. +func (r *Reader) Record() array.Record { + return r.rec +} + +// Read reads the current record from the underlying stream and an error, if any. +// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). +func (r *Reader) Read() (array.Record, error) { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + + if !r.next() { + if r.done { + return nil, io.EOF + } + return nil, r.err + } + + return r.rec, nil +} + +var ( + _ array.RecordReader = (*Reader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/ipc/writer.go b/vendor/github.com/apache/arrow/go/arrow/ipc/writer.go new file mode 100644 index 000000000000..f9e0e274855a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/ipc/writer.go @@ -0,0 +1,458 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc // import "github.com/apache/arrow/go/arrow/ipc" + +import ( + "io" + "math" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/bitutil" + "github.com/apache/arrow/go/arrow/memory" + "golang.org/x/xerrors" +) + +type swriter struct { + w io.Writer + pos int64 +} + +func (w *swriter) start() error { return nil } +func (w *swriter) Close() error { + _, err := w.Write(kEOS[:]) + return err +} + +func (w *swriter) write(p payload) error { + _, err := writeIPCPayload(w, p) + if err != nil { + return err + } + return nil +} + +func (w *swriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.pos += int64(n) + return n, err +} + +// Writer is an Arrow stream writer. +type Writer struct { + w io.Writer + + mem memory.Allocator + pw payloadWriter + + started bool + schema *arrow.Schema +} + +// NewWriter returns a writer that writes records to the provided output stream. +func NewWriter(w io.Writer, opts ...Option) *Writer { + cfg := newConfig(opts...) + return &Writer{ + w: w, + mem: cfg.alloc, + pw: &swriter{w: w}, + schema: cfg.schema, + } +} + +func (w *Writer) Close() error { + if !w.started { + err := w.start() + if err != nil { + return err + } + } + + if w.pw == nil { + return nil + } + + err := w.pw.Close() + if err != nil { + return xerrors.Errorf("arrow/ipc: could not close payload writer: %w", err) + } + w.pw = nil + + return nil +} + +func (w *Writer) Write(rec array.Record) error { + if !w.started { + err := w.start() + if err != nil { + return err + } + } + + schema := rec.Schema() + if schema == nil || !schema.Equal(w.schema) { + return errInconsistentSchema + } + + const allow64b = true + var ( + data = payload{msg: MessageRecordBatch} + enc = newRecordEncoder(w.mem, 0, kMaxNestingDepth, allow64b) + ) + defer data.Release() + + if err := enc.Encode(&data, rec); err != nil { + return xerrors.Errorf("arrow/ipc: could not encode record to payload: %w", err) + } + + return w.pw.write(data) +} + +func (w *Writer) start() error { + w.started = true + + // write out schema payloads + ps := payloadsFromSchema(w.schema, w.mem, nil) + defer ps.Release() + + for _, data := range ps { + err := w.pw.write(data) + if err != nil { + return err + } + } + + return nil +} + +type recordEncoder struct { + mem memory.Allocator + + fields []fieldMetadata + meta []bufferMetadata + + depth int64 + start int64 + allow64b bool +} + +func newRecordEncoder(mem memory.Allocator, startOffset, maxDepth int64, allow64b bool) *recordEncoder { + return &recordEncoder{ + mem: mem, + start: startOffset, + depth: maxDepth, + allow64b: allow64b, + } +} + +func (w *recordEncoder) Encode(p *payload, rec array.Record) error { + + // perform depth-first traversal of the row-batch + for i, col := range rec.Columns() { + err := w.visit(p, col) + if err != nil { + return xerrors.Errorf("arrow/ipc: could not encode column %d (%q): %w", i, rec.ColumnName(i), err) + } + } + + // position for the start of a buffer relative to the passed frame of reference. + // may be 0 or some other position in an address space. + offset := w.start + w.meta = make([]bufferMetadata, len(p.body)) + + // construct the metadata for the record batch header + for i, buf := range p.body { + var ( + size int64 + padding int64 + ) + // the buffer might be null if we are handling zero row lengths. + if buf != nil { + size = int64(buf.Len()) + padding = bitutil.CeilByte64(size) - size + } + w.meta[i] = bufferMetadata{ + Offset: offset, + Len: size + padding, + } + offset += size + padding + } + + p.size = offset - w.start + if !bitutil.IsMultipleOf8(p.size) { + panic("not aligned") + } + + return w.encodeMetadata(p, rec.NumRows()) +} + +func (w *recordEncoder) visit(p *payload, arr array.Interface) error { + if w.depth <= 0 { + return errMaxRecursion + } + + if !w.allow64b && arr.Len() > math.MaxInt32 { + return errBigArray + } + + // add all common elements + w.fields = append(w.fields, fieldMetadata{ + Len: int64(arr.Len()), + Nulls: int64(arr.NullN()), + Offset: 0, + }) + + switch arr.NullN() { + case 0: + p.body = append(p.body, nil) + default: + switch arr.DataType().ID() { + case arrow.NULL: + // Null type has no validity bitmap + default: + data := arr.Data() + bitmap := newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[0]) + p.body = append(p.body, bitmap) + } + } + + switch dtype := arr.DataType().(type) { + case *arrow.NullType: + // ok. NullArrays are completely empty. + + case *arrow.BooleanType: + var ( + data = arr.Data() + bitm *memory.Buffer + ) + + if data.Len() != 0 { + bitm = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[1]) + } + p.body = append(p.body, bitm) + + case arrow.FixedWidthDataType: + data := arr.Data() + values := data.Buffers()[1] + arrLen := int64(arr.Len()) + typeWidth := int64(dtype.BitWidth() / 8) + minLength := paddedLength(arrLen*typeWidth, kArrowAlignment) + + switch { + case needTruncate(int64(data.Offset()), values, minLength): + // non-zero offset: slice the buffer + offset := int64(data.Offset()) * typeWidth + // send padding if available + len := minI64(bitutil.CeilByte64(arrLen*typeWidth), int64(data.Len())-offset) + data = array.NewSliceData(data, offset, offset+len) + defer data.Release() + values = data.Buffers()[1] + } + if values != nil { + values.Retain() + } + p.body = append(p.body, values) + + case *arrow.BinaryType: + arr := arr.(*array.Binary) + voffsets, err := w.getZeroBasedValueOffsets(arr) + if err != nil { + return xerrors.Errorf("could not retrieve zero-based value offsets from %T: %w", arr, err) + } + data := arr.Data() + values := data.Buffers()[2] + + var totalDataBytes int64 + if voffsets != nil { + totalDataBytes = int64(len(arr.ValueBytes())) + } + + switch { + case needTruncate(int64(data.Offset()), values, totalDataBytes): + // slice data buffer to include the range we need now. + var ( + beg = int64(arr.ValueOffset(0)) + len = minI64(paddedLength(totalDataBytes, kArrowAlignment), int64(data.Len())-beg) + ) + data = array.NewSliceData(data, beg, beg+len) + defer data.Release() + values = data.Buffers()[2] + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, voffsets) + p.body = append(p.body, values) + + case *arrow.StringType: + arr := arr.(*array.String) + voffsets, err := w.getZeroBasedValueOffsets(arr) + if err != nil { + return xerrors.Errorf("could not retrieve zero-based value offsets from %T: %w", arr, err) + } + data := arr.Data() + values := data.Buffers()[2] + + var totalDataBytes int64 + if voffsets != nil { + totalDataBytes = int64(arr.ValueOffset(arr.Len()) - arr.ValueOffset(0)) + } + + switch { + case needTruncate(int64(data.Offset()), values, totalDataBytes): + // slice data buffer to include the range we need now. + var ( + beg = int64(arr.ValueOffset(0)) + len = minI64(paddedLength(totalDataBytes, kArrowAlignment), int64(data.Len())-beg) + ) + data = array.NewSliceData(data, beg, beg+len) + defer data.Release() + values = data.Buffers()[2] + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, voffsets) + p.body = append(p.body, values) + + case *arrow.StructType: + w.depth-- + arr := arr.(*array.Struct) + for i := 0; i < arr.NumField(); i++ { + err := w.visit(p, arr.Field(i)) + if err != nil { + return xerrors.Errorf("could not visit field %d of struct-array: %w", i, err) + } + } + w.depth++ + + case *arrow.ListType: + arr := arr.(*array.List) + voffsets, err := w.getZeroBasedValueOffsets(arr) + if err != nil { + return xerrors.Errorf("could not retrieve zero-based value offsets for array %T: %w", arr, err) + } + p.body = append(p.body, voffsets) + + w.depth-- + var ( + values = arr.ListValues() + mustRelease = false + values_offset int64 + values_length int64 + ) + defer func() { + if mustRelease { + values.Release() + } + }() + + if voffsets != nil { + values_offset = int64(arr.Offsets()[0]) + values_length = int64(arr.Offsets()[arr.Len()]) - values_offset + } + + if len(arr.Offsets()) != 0 || values_length < int64(values.Len()) { + // must also slice the values + values = array.NewSlice(values, values_offset, values_length) + mustRelease = true + } + err = w.visit(p, values) + + if err != nil { + return xerrors.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.FixedSizeListType: + arr := arr.(*array.FixedSizeList) + + w.depth-- + + size := int64(arr.DataType().(*arrow.FixedSizeListType).Len()) + beg := int64(arr.Offset()) * size + end := int64(arr.Offset()+arr.Len()) * size + + values := array.NewSlice(arr.ListValues(), beg, end) + defer values.Release() + + err := w.visit(p, values) + + if err != nil { + return xerrors.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + default: + panic(xerrors.Errorf("arrow/ipc: unknown array %T (dtype=%T)", arr, dtype)) + } + + return nil +} + +func (w *recordEncoder) getZeroBasedValueOffsets(arr array.Interface) (*memory.Buffer, error) { + data := arr.Data() + voffsets := data.Buffers()[1] + if data.Offset() != 0 { + // FIXME(sbinet): writer.cc:231 + panic(xerrors.Errorf("not implemented offset=%d", data.Offset())) + } + if voffsets == nil || voffsets.Len() == 0 { + return nil, nil + } + + voffsets.Retain() + return voffsets, nil +} + +func (w *recordEncoder) encodeMetadata(p *payload, nrows int64) error { + p.meta = writeRecordMessage(w.mem, nrows, p.size, w.fields, w.meta) + return nil +} + +func newTruncatedBitmap(mem memory.Allocator, offset, length int64, input *memory.Buffer) *memory.Buffer { + if input != nil { + input.Retain() + return input + } + + minLength := paddedLength(bitutil.BytesForBits(length), kArrowAlignment) + switch { + case offset != 0 || minLength < int64(input.Len()): + // with a sliced array / non-zero offset, we must copy the bitmap + panic("not implemented") // FIXME(sbinet): writer.cc:75 + default: + input.Retain() + return input + } +} + +func needTruncate(offset int64, buf *memory.Buffer, minLength int64) bool { + if buf == nil { + return false + } + return offset != 0 || minLength < int64(buf.Len()) +} + +func minI64(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/Makefile b/vendor/github.com/apache/arrow/go/arrow/memory/Makefile new file mode 100644 index 000000000000..e50d94e60312 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/Makefile @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +GO_BUILD=go build +GO_GEN=go generate +GO_TEST=go test +GOPATH=$(realpath ../../../..) + +# this converts rotate instructions from "ro[lr] " -> "ro[lr] , 1" for yasm compatibility +PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/' + +C2GOASM=c2goasm -a -f +CC=clang +C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 -fno-asynchronous-unwind-tables \ + -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib +ASM_FLAGS_AVX2=-mavx2 -mfma -mllvm -force-vector-width=32 +ASM_FLAGS_SSE3=-msse3 +ASM_FLAGS_SSE4=-msse4 + +GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') +ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') + +INTEL_SOURCES := \ + memory_avx2_amd64.s memory_sse4_amd64.s + +.PHONEY: assembly + +assembly: $(INTEL_SOURCES) + +_lib/memory_avx2.s: _lib/memory.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/memory_sse4.s: _lib/memory.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +memory_avx2_amd64.s: _lib/memory_avx2.s + $(C2GOASM) -a -f $^ $@ + +memory_sse4_amd64.s: _lib/memory_sse4.s + $(C2GOASM) -a -f $^ $@ + diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/allocator.go b/vendor/github.com/apache/arrow/go/arrow/memory/allocator.go new file mode 100644 index 000000000000..da6be44e37a8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/allocator.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +const ( + alignment = 64 +) + +type Allocator interface { + Allocate(size int) []byte + Reallocate(size int, b []byte) []byte + Free(b []byte) +} + +// DefaultAllocator is a default implementation of Allocator and can be used anywhere +// an Allocator is required. +// +// DefaultAllocator is safe to use from multiple goroutines. +var DefaultAllocator Allocator = NewGoAllocator() diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/buffer.go b/vendor/github.com/apache/arrow/go/arrow/memory/buffer.go new file mode 100644 index 000000000000..57c0db487d18 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/buffer.go @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/arrow/internal/debug" +) + +// Buffer is a wrapper type for a buffer of bytes. +type Buffer struct { + refCount int64 + buf []byte + length int + mutable bool + mem Allocator +} + +// NewBufferBytes creates a fixed-size buffer from the specified data. +func NewBufferBytes(data []byte) *Buffer { + return &Buffer{refCount: 0, buf: data, length: len(data)} +} + +// NewResizableBuffer creates a mutable, resizable buffer with an Allocator for managing memory. +func NewResizableBuffer(mem Allocator) *Buffer { + return &Buffer{refCount: 1, mutable: true, mem: mem} +} + +// Retain increases the reference count by 1. +func (b *Buffer) Retain() { + if b.mem != nil { + atomic.AddInt64(&b.refCount, 1) + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Buffer) Release() { + if b.mem != nil { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + b.mem.Free(b.buf) + b.buf, b.length = nil, 0 + } + } +} + +// Reset resets the buffer for reuse. +func (b *Buffer) Reset(buf []byte) { + b.buf = buf + b.length = len(buf) +} + +// Buf returns the slice of memory allocated by the Buffer, which is adjusted by calling Reserve. +func (b *Buffer) Buf() []byte { return b.buf } + +// Bytes returns a slice of size Len, which is adjusted by calling Resize. +func (b *Buffer) Bytes() []byte { return b.buf[:b.length] } + +// Mutable returns a bool indicating whether the buffer is mutable or not. +func (b *Buffer) Mutable() bool { return b.mutable } + +// Len returns the length of the buffer. +func (b *Buffer) Len() int { return b.length } + +// Cap returns the capacity of the buffer. +func (b *Buffer) Cap() int { return len(b.buf) } + +// Reserve reserves the provided amount of capacity for the buffer. +func (b *Buffer) Reserve(capacity int) { + if capacity > len(b.buf) { + newCap := roundUpToMultipleOf64(capacity) + if len(b.buf) == 0 { + b.buf = b.mem.Allocate(newCap) + } else { + b.buf = b.mem.Reallocate(newCap, b.buf) + } + } +} + +// Resize resizes the buffer to the target size. +func (b *Buffer) Resize(newSize int) { + b.resize(newSize, true) +} + +// ResizeNoShrink resizes the buffer to the target size, but will not +// shrink it. +func (b *Buffer) ResizeNoShrink(newSize int) { + b.resize(newSize, false) +} + +func (b *Buffer) resize(newSize int, shrink bool) { + if !shrink || newSize > b.length { + b.Reserve(newSize) + } else { + // Buffer is not growing, so shrink to the requested size without + // excess space. + newCap := roundUpToMultipleOf64(newSize) + if len(b.buf) != newCap { + if newSize == 0 { + b.mem.Free(b.buf) + b.buf = nil + } else { + b.buf = b.mem.Reallocate(newCap, b.buf) + } + } + } + b.length = newSize +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/checked_allocator.go b/vendor/github.com/apache/arrow/go/arrow/memory/checked_allocator.go new file mode 100644 index 000000000000..24dc8dfd2491 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/checked_allocator.go @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +type CheckedAllocator struct { + mem Allocator + base int + sz int +} + +func NewCheckedAllocator(mem Allocator) *CheckedAllocator { + return &CheckedAllocator{mem: mem} +} + +func (a *CheckedAllocator) Allocate(size int) []byte { + a.sz += size + return a.mem.Allocate(size) +} + +func (a *CheckedAllocator) Reallocate(size int, b []byte) []byte { + a.sz += size - len(b) + return a.mem.Reallocate(size, b) +} + +func (a *CheckedAllocator) Free(b []byte) { + a.sz -= len(b) + a.mem.Free(b) +} + +type TestingT interface { + Errorf(format string, args ...interface{}) + Helper() +} + +func (a *CheckedAllocator) AssertSize(t TestingT, sz int) { + if a.sz != sz { + t.Helper() + t.Errorf("invalid memory size exp=%d, got=%d", sz, a.sz) + } +} + +type CheckedAllocatorScope struct { + alloc *CheckedAllocator + sz int +} + +func NewCheckedAllocatorScope(alloc *CheckedAllocator) *CheckedAllocatorScope { + return &CheckedAllocatorScope{alloc: alloc, sz: alloc.sz} +} + +func (c *CheckedAllocatorScope) CheckSize(t TestingT) { + if c.sz != c.alloc.sz { + t.Helper() + t.Errorf("invalid memory size exp=%d, got=%d", c.sz, c.alloc.sz) + } +} + +var ( + _ Allocator = (*CheckedAllocator)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/doc.go b/vendor/github.com/apache/arrow/go/arrow/memory/doc.go new file mode 100644 index 000000000000..959f88b4f222 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/doc.go @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package memory provides support for allocating and manipulating memory at a low level. +*/ +package memory diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/go_allocator.go b/vendor/github.com/apache/arrow/go/arrow/memory/go_allocator.go new file mode 100644 index 000000000000..1dea4a8d2338 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/go_allocator.go @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +type GoAllocator struct{} + +func NewGoAllocator() *GoAllocator { return &GoAllocator{} } + +func (a *GoAllocator) Allocate(size int) []byte { + buf := make([]byte, size+alignment) // padding for 64-byte alignment + addr := int(addressOf(buf)) + next := roundUpToMultipleOf64(addr) + if addr != next { + shift := next - addr + return buf[shift : size+shift : size+shift] + } + return buf[:size:size] +} + +func (a *GoAllocator) Reallocate(size int, b []byte) []byte { + if size == len(b) { + return b + } + + newBuf := a.Allocate(size) + copy(newBuf, b) + return newBuf +} + +func (a *GoAllocator) Free(b []byte) {} + +var ( + _ Allocator = (*GoAllocator)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory.go b/vendor/github.com/apache/arrow/go/arrow/memory/memory.go new file mode 100644 index 000000000000..43627f5ed18b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +var ( + memset func(b []byte, c byte) = memory_memset_go +) + +// Set assigns the value c to every element of the slice buf. +func Set(buf []byte, c byte) { + memset(buf, c) +} + +// memory_memset_go reference implementation +func memory_memset_go(buf []byte, c byte) { + for i := 0; i < len(buf); i++ { + buf[i] = c + } +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_amd64.go b/vendor/github.com/apache/arrow/go/arrow/memory/memory_amd64.go new file mode 100644 index 000000000000..5ea4a3fe1a04 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_amd64.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import ( + "github.com/apache/arrow/go/arrow/internal/cpu" +) + +func init() { + if cpu.X86.HasAVX2 { + memset = memory_memset_avx2 + } else if cpu.X86.HasSSE42 { + memset = memory_memset_sse4 + } else { + memset = memory_memset_go + } +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_avx2_amd64.go b/vendor/github.com/apache/arrow/go/arrow/memory/memory_avx2_amd64.go new file mode 100644 index 000000000000..2bd851ea5327 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_avx2_amd64.go @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import "unsafe" + +//go:noescape +func _memset_avx2(buf unsafe.Pointer, len, c uintptr) + +func memory_memset_avx2(buf []byte, c byte) { + if len(buf) == 0 { + return + } + + var ( + p1 = unsafe.Pointer(&buf[0]) + p2 = uintptr(len(buf)) + p3 = uintptr(c) + ) + if len(buf) > 2000 || isMultipleOfPowerOf2(len(buf), 256) { + _memset_avx2(p1, p2, p3) + } else { + _memset_sse4(p1, p2, p3) + } +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_avx2_amd64.s b/vendor/github.com/apache/arrow/go/arrow/memory/memory_avx2_amd64.s new file mode 100644 index 000000000000..2a77807cb27c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_avx2_amd64.s @@ -0,0 +1,85 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ·_memset_avx2(SB), $0-24 + + MOVQ buf+0(FP), DI + MOVQ len+8(FP), SI + MOVQ c+16(FP), DX + + LONG $0x371c8d4c // lea r11, [rdi + rsi] + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JBE LBB0_13 + LONG $0x80fe8148; WORD $0x0000; BYTE $0x00 // cmp rsi, 128 + JB LBB0_12 + WORD $0x8949; BYTE $0xf0 // mov r8, rsi + LONG $0x80e08349 // and r8, -128 + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0x80e28349 // and r10, -128 + JE LBB0_12 + LONG $0xc26ef9c5 // vmovd xmm0, edx + LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0 + LONG $0x804a8d4d // lea r9, [r10 - 128] + WORD $0x8944; BYTE $0xc8 // mov eax, r9d + WORD $0xe8c1; BYTE $0x07 // shr eax, 7 + WORD $0xc0ff // inc eax + LONG $0x03e08348 // and rax, 3 + JE LBB0_4 + WORD $0xf748; BYTE $0xd8 // neg rax + WORD $0xc931 // xor ecx, ecx + +LBB0_6: + LONG $0x047ffec5; BYTE $0x0f // vmovdqu yword [rdi + rcx], ymm0 + LONG $0x447ffec5; WORD $0x200f // vmovdqu yword [rdi + rcx + 32], ymm0 + LONG $0x447ffec5; WORD $0x400f // vmovdqu yword [rdi + rcx + 64], ymm0 + LONG $0x447ffec5; WORD $0x600f // vmovdqu yword [rdi + rcx + 96], ymm0 + LONG $0x80e98348 // sub rcx, -128 + WORD $0xff48; BYTE $0xc0 // inc rax + JNE LBB0_6 + JMP LBB0_7 + +LBB0_4: + WORD $0xc931 // xor ecx, ecx + +LBB0_7: + LONG $0x80f98149; WORD $0x0001; BYTE $0x00 // cmp r9, 384 + JB LBB0_10 + WORD $0x894c; BYTE $0xd0 // mov rax, r10 + WORD $0x2948; BYTE $0xc8 // sub rax, rcx + QUAD $0x000001e00f8c8d48 // lea rcx, [rdi + rcx + 480] + +LBB0_9: + QUAD $0xfffffe20817ffec5 // vmovdqu yword [rcx - 480], ymm0 + QUAD $0xfffffe40817ffec5 // vmovdqu yword [rcx - 448], ymm0 + QUAD $0xfffffe60817ffec5 // vmovdqu yword [rcx - 416], ymm0 + QUAD $0xfffffe80817ffec5 // vmovdqu yword [rcx - 384], ymm0 + QUAD $0xfffffea0817ffec5 // vmovdqu yword [rcx - 352], ymm0 + QUAD $0xfffffec0817ffec5 // vmovdqu yword [rcx - 320], ymm0 + QUAD $0xfffffee0817ffec5 // vmovdqu yword [rcx - 288], ymm0 + QUAD $0xffffff00817ffec5 // vmovdqu yword [rcx - 256], ymm0 + QUAD $0xffffff20817ffec5 // vmovdqu yword [rcx - 224], ymm0 + QUAD $0xffffff40817ffec5 // vmovdqu yword [rcx - 192], ymm0 + QUAD $0xffffff60817ffec5 // vmovdqu yword [rcx - 160], ymm0 + LONG $0x417ffec5; BYTE $0x80 // vmovdqu yword [rcx - 128], ymm0 + LONG $0x417ffec5; BYTE $0xa0 // vmovdqu yword [rcx - 96], ymm0 + LONG $0x417ffec5; BYTE $0xc0 // vmovdqu yword [rcx - 64], ymm0 + LONG $0x417ffec5; BYTE $0xe0 // vmovdqu yword [rcx - 32], ymm0 + LONG $0x017ffec5 // vmovdqu yword [rcx], ymm0 + LONG $0x00c18148; WORD $0x0002; BYTE $0x00 // add rcx, 512 + LONG $0xfe000548; WORD $0xffff // add rax, -512 + JNE LBB0_9 + +LBB0_10: + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + JE LBB0_13 + WORD $0x014c; BYTE $0xc7 // add rdi, r8 + +LBB0_12: + WORD $0x1788 // mov byte [rdi], dl + WORD $0xff48; BYTE $0xc7 // inc rdi + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JNE LBB0_12 + +LBB0_13: + VZEROUPPER + RET diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_js_wasm.go b/vendor/github.com/apache/arrow/go/arrow/memory/memory_js_wasm.go new file mode 100644 index 000000000000..9b94d99ff33c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_js_wasm.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build wasm + +package memory + +func init() { + memset = memory_memset_go +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_noasm.go b/vendor/github.com/apache/arrow/go/arrow/memory/memory_noasm.go new file mode 100644 index 000000000000..bf8846fa2e05 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_noasm.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build noasm + +package memory + +func init() { + memset = memory_memset_go +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_sse4_amd64.go b/vendor/github.com/apache/arrow/go/arrow/memory/memory_sse4_amd64.go new file mode 100644 index 000000000000..716c0d2704a8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_sse4_amd64.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import "unsafe" + +//go:noescape +func _memset_sse4(buf unsafe.Pointer, len, c uintptr) + +func memory_memset_sse4(buf []byte, c byte) { + if len(buf) == 0 { + return + } + _memset_sse4(unsafe.Pointer(&buf[0]), uintptr(len(buf)), uintptr(c)) +} diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/memory_sse4_amd64.s b/vendor/github.com/apache/arrow/go/arrow/memory/memory_sse4_amd64.s new file mode 100644 index 000000000000..b1906f99b716 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/memory_sse4_amd64.s @@ -0,0 +1,84 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ·_memset_sse4(SB), $0-24 + + MOVQ buf+0(FP), DI + MOVQ len+8(FP), SI + MOVQ c+16(FP), DX + + LONG $0x371c8d4c // lea r11, [rdi + rsi] + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JBE LBB0_13 + LONG $0x20fe8348 // cmp rsi, 32 + JB LBB0_12 + WORD $0x8949; BYTE $0xf0 // mov r8, rsi + LONG $0xe0e08349 // and r8, -32 + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0xe0e28349 // and r10, -32 + JE LBB0_12 + WORD $0xb60f; BYTE $0xc2 // movzx eax, dl + LONG $0xc06e0f66 // movd xmm0, eax + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0x00380f66; BYTE $0xc1 // pshufb xmm0, xmm1 + LONG $0xe04a8d4d // lea r9, [r10 - 32] + WORD $0x8944; BYTE $0xc9 // mov ecx, r9d + WORD $0xe9c1; BYTE $0x05 // shr ecx, 5 + WORD $0xc1ff // inc ecx + LONG $0x07e18348 // and rcx, 7 + JE LBB0_4 + WORD $0xf748; BYTE $0xd9 // neg rcx + WORD $0xc031 // xor eax, eax + +LBB0_6: + LONG $0x047f0ff3; BYTE $0x07 // movdqu oword [rdi + rax], xmm0 + LONG $0x447f0ff3; WORD $0x1007 // movdqu oword [rdi + rax + 16], xmm0 + LONG $0x20c08348 // add rax, 32 + WORD $0xff48; BYTE $0xc1 // inc rcx + JNE LBB0_6 + JMP LBB0_7 + +LBB0_4: + WORD $0xc031 // xor eax, eax + +LBB0_7: + LONG $0xe0f98149; WORD $0x0000; BYTE $0x00 // cmp r9, 224 + JB LBB0_10 + WORD $0x894c; BYTE $0xd1 // mov rcx, r10 + WORD $0x2948; BYTE $0xc1 // sub rcx, rax + QUAD $0x000000f007848d48 // lea rax, [rdi + rax + 240] + +LBB0_9: + QUAD $0xffffff10807f0ff3 // movdqu oword [rax - 240], xmm0 + QUAD $0xffffff20807f0ff3 // movdqu oword [rax - 224], xmm0 + QUAD $0xffffff30807f0ff3 // movdqu oword [rax - 208], xmm0 + QUAD $0xffffff40807f0ff3 // movdqu oword [rax - 192], xmm0 + QUAD $0xffffff50807f0ff3 // movdqu oword [rax - 176], xmm0 + QUAD $0xffffff60807f0ff3 // movdqu oword [rax - 160], xmm0 + QUAD $0xffffff70807f0ff3 // movdqu oword [rax - 144], xmm0 + LONG $0x407f0ff3; BYTE $0x80 // movdqu oword [rax - 128], xmm0 + LONG $0x407f0ff3; BYTE $0x90 // movdqu oword [rax - 112], xmm0 + LONG $0x407f0ff3; BYTE $0xa0 // movdqu oword [rax - 96], xmm0 + LONG $0x407f0ff3; BYTE $0xb0 // movdqu oword [rax - 80], xmm0 + LONG $0x407f0ff3; BYTE $0xc0 // movdqu oword [rax - 64], xmm0 + LONG $0x407f0ff3; BYTE $0xd0 // movdqu oword [rax - 48], xmm0 + LONG $0x407f0ff3; BYTE $0xe0 // movdqu oword [rax - 32], xmm0 + LONG $0x407f0ff3; BYTE $0xf0 // movdqu oword [rax - 16], xmm0 + LONG $0x007f0ff3 // movdqu oword [rax], xmm0 + LONG $0x01000548; WORD $0x0000 // add rax, 256 + LONG $0x00c18148; WORD $0xffff; BYTE $0xff // add rcx, -256 + JNE LBB0_9 + +LBB0_10: + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + JE LBB0_13 + WORD $0x014c; BYTE $0xc7 // add rdi, r8 + +LBB0_12: + WORD $0x1788 // mov byte [rdi], dl + WORD $0xff48; BYTE $0xc7 // inc rdi + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JNE LBB0_12 + +LBB0_13: + RET diff --git a/vendor/github.com/apache/arrow/go/arrow/memory/util.go b/vendor/github.com/apache/arrow/go/arrow/memory/util.go new file mode 100644 index 000000000000..3b0d3a5cb9ef --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/memory/util.go @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import "unsafe" + +func roundToPowerOf2(v, round int) int { + forceCarry := round - 1 + truncateMask := ^forceCarry + return (v + forceCarry) & truncateMask +} + +func roundUpToMultipleOf64(v int) int { + return roundToPowerOf2(v, 64) +} + +func isMultipleOfPowerOf2(v int, d int) bool { + return (v & (d - 1)) == 0 +} + +func addressOf(b []byte) uintptr { + return uintptr(unsafe.Pointer(&b[0])) +} diff --git a/vendor/github.com/apache/arrow/go/arrow/numeric.schema.json b/vendor/github.com/apache/arrow/go/arrow/numeric.schema.json new file mode 100644 index 000000000000..7fa2800a57a3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/numeric.schema.json @@ -0,0 +1,15 @@ +{ + "title": "templates", + "type": "array", + "items": { + "title": "template", + "type": "object", + "properties": { + "Name": { + "type": "string", + "description": "The name of the template type" + } + }, + "required": ["Name"] + } +} \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/arrow/numeric.tmpldata b/vendor/github.com/apache/arrow/go/arrow/numeric.tmpldata new file mode 100644 index 000000000000..127a5a107e22 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/numeric.tmpldata @@ -0,0 +1,141 @@ +[ + { + "Name": "Int64", + "name": "int64", + "Type": "int64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Uint64", + "name": "uint64", + "Type": "uint64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Float64", + "name": "float64", + "Type": "float64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Int32", + "name": "int32", + "Type": "int32", + "Default": "0", + "Size": "4", + "Opt": { + "BufferBuilder": true + } + }, + { + "Name": "Uint32", + "name": "uint32", + "Type": "uint32", + "Default": "0", + "Size": "4" + }, + { + "Name": "Float32", + "name": "float32", + "Type": "float32", + "Default": "0", + "Size": "4" + }, + { + "Name": "Int16", + "name": "int16", + "Type": "int16", + "Default": "0", + "Size": "2" + }, + { + "Name": "Uint16", + "name": "uint16", + "Type": "uint16", + "Default": "0", + "Size": "2" + }, + { + "Name": "Int8", + "name": "int8", + "Type": "int8", + "Default": "0", + "Size": "1" + }, + { + "Name": "Uint8", + "name": "uint8", + "Type": "uint8", + "Default": "0", + "Size": "1" + }, + { + "Name": "Timestamp", + "name": "timestamp", + "Type": "Timestamp", + "QualifiedType": "arrow.Timestamp", + "InternalType": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "Parametric": true + } + }, + { + "Name": "Time32", + "name": "time32", + "Type": "Time32", + "QualifiedType": "arrow.Time32", + "InternalType": "int32", + "Default": "0", + "Size": "4", + "Opt": { + "Parametric": true + } + }, + { + "Name": "Time64", + "name": "time64", + "Type": "Time64", + "QualifiedType": "arrow.Time64", + "InternalType": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "Parametric": true + } + }, + { + "Name": "Date32", + "name": "date32", + "Type": "Date32", + "QualifiedType": "arrow.Date32", + "InternalType": "int32", + "Default": "0", + "Size": "4" + }, + { + "Name": "Date64", + "name": "date64", + "Type": "Date64", + "QualifiedType": "arrow.Date64", + "InternalType": "int64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Duration", + "name": "duration", + "Type": "Duration", + "QualifiedType": "arrow.Duration", + "InternalType": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "Parametric": true + } + } +] diff --git a/vendor/github.com/apache/arrow/go/arrow/schema.go b/vendor/github.com/apache/arrow/go/arrow/schema.go new file mode 100644 index 000000000000..ab9e536e64ce --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/schema.go @@ -0,0 +1,193 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "sort" + "strings" +) + +type Metadata struct { + keys []string + values []string +} + +func NewMetadata(keys, values []string) Metadata { + if len(keys) != len(values) { + panic("arrow: len mismatch") + } + + n := len(keys) + if n == 0 { + return Metadata{} + } + + md := Metadata{ + keys: make([]string, n), + values: make([]string, n), + } + copy(md.keys, keys) + copy(md.values, values) + return md +} + +func MetadataFrom(kv map[string]string) Metadata { + md := Metadata{ + keys: make([]string, 0, len(kv)), + values: make([]string, 0, len(kv)), + } + for k := range kv { + md.keys = append(md.keys, k) + } + sort.Strings(md.keys) + for _, k := range md.keys { + md.values = append(md.values, kv[k]) + } + return md +} + +func (md Metadata) Len() int { return len(md.keys) } +func (md Metadata) Keys() []string { return md.keys } +func (md Metadata) Values() []string { return md.values } + +func (md Metadata) String() string { + o := new(strings.Builder) + fmt.Fprintf(o, "[") + for i := range md.keys { + if i > 0 { + fmt.Fprintf(o, ", ") + } + fmt.Fprintf(o, "%q: %q", md.keys[i], md.values[i]) + } + fmt.Fprintf(o, "]") + return o.String() +} + +// FindKey returns the index of the key-value pair with the provided key name, +// or -1 if such a key does not exist. +func (md Metadata) FindKey(k string) int { + for i, v := range md.keys { + if v == k { + return i + } + } + return -1 +} + +func (md Metadata) clone() Metadata { + if len(md.keys) == 0 { + return Metadata{} + } + + o := Metadata{ + keys: make([]string, len(md.keys)), + values: make([]string, len(md.values)), + } + copy(o.keys, md.keys) + copy(o.values, md.values) + + return o +} + +// Schema is a sequence of Field values, describing the columns of a table or +// a record batch. +type Schema struct { + fields []Field + index map[string][]int + meta Metadata +} + +// NewSchema returns a new Schema value from the slice of fields and metadata. +// +// NewSchema panics if there is a field with an invalid DataType. +func NewSchema(fields []Field, metadata *Metadata) *Schema { + sc := &Schema{ + fields: make([]Field, 0, len(fields)), + index: make(map[string][]int, len(fields)), + } + if metadata != nil { + sc.meta = metadata.clone() + } + for i, field := range fields { + if field.Type == nil { + panic("arrow: field with nil DataType") + } + sc.fields = append(sc.fields, field) + sc.index[field.Name] = append(sc.index[field.Name], i) + } + return sc +} + +func (sc *Schema) Metadata() Metadata { return sc.meta } +func (sc *Schema) Fields() []Field { return sc.fields } +func (sc *Schema) Field(i int) Field { return sc.fields[i] } + +func (sc *Schema) FieldsByName(n string) ([]Field, bool) { + indices, ok := sc.index[n] + if !ok { + return nil, ok + } + fields := make([]Field, 0, len(indices)) + for _, v := range indices { + fields = append(fields, sc.fields[v]) + } + return fields, ok +} + +// FieldIndices returns the indices of the named field or nil. +func (sc *Schema) FieldIndices(n string) []int { + return sc.index[n] +} + +func (sc *Schema) HasField(n string) bool { return len(sc.FieldIndices(n)) > 0 } +func (sc *Schema) HasMetadata() bool { return len(sc.meta.keys) > 0 } + +// Equal returns whether two schema are equal. +// Equal does not compare the metadata. +func (sc *Schema) Equal(o *Schema) bool { + switch { + case sc == o: + return true + case sc == nil || o == nil: + return false + case len(sc.fields) != len(o.fields): + return false + } + + for i := range sc.fields { + if !sc.fields[i].Equal(o.fields[i]) { + return false + } + } + return true +} + +func (s *Schema) String() string { + o := new(strings.Builder) + fmt.Fprintf(o, "schema:\n fields: %d\n", len(s.Fields())) + for i, f := range s.Fields() { + if i > 0 { + o.WriteString("\n") + } + fmt.Fprintf(o, " - %v", f) + } + if meta := s.Metadata(); meta.Len() > 0 { + fmt.Fprintf(o, "\n metadata: %v", meta) + } + return o.String() +} diff --git a/vendor/github.com/apache/arrow/go/arrow/type_string.go b/vendor/github.com/apache/arrow/go/arrow/type_string.go new file mode 100644 index 000000000000..d81cb7b5b1f8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package arrow + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NULL-0] + _ = x[BOOL-1] + _ = x[UINT8-2] + _ = x[INT8-3] + _ = x[UINT16-4] + _ = x[INT16-5] + _ = x[UINT32-6] + _ = x[INT32-7] + _ = x[UINT64-8] + _ = x[INT64-9] + _ = x[FLOAT16-10] + _ = x[FLOAT32-11] + _ = x[FLOAT64-12] + _ = x[STRING-13] + _ = x[BINARY-14] + _ = x[FIXED_SIZE_BINARY-15] + _ = x[DATE32-16] + _ = x[DATE64-17] + _ = x[TIMESTAMP-18] + _ = x[TIME32-19] + _ = x[TIME64-20] + _ = x[INTERVAL-21] + _ = x[DECIMAL-22] + _ = x[LIST-23] + _ = x[STRUCT-24] + _ = x[UNION-25] + _ = x[DICTIONARY-26] + _ = x[MAP-27] + _ = x[EXTENSION-28] + _ = x[FIXED_SIZE_LIST-29] + _ = x[DURATION-30] +} + +const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVALDECIMALLISTSTRUCTUNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATION" + +var _Type_index = [...]uint8{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 141, 148, 152, 158, 163, 173, 176, 185, 200, 208} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_boolean.go b/vendor/github.com/apache/arrow/go/arrow/type_traits_boolean.go new file mode 100644 index 000000000000..a3a5c59257b1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_boolean.go @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "github.com/apache/arrow/go/arrow/bitutil" +) + +type booleanTraits struct{} + +var BooleanTraits booleanTraits + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (booleanTraits) BytesRequired(n int) int { return bitutil.CeilByte(n) / 8 } diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_decimal128.go b/vendor/github.com/apache/arrow/go/arrow/type_traits_decimal128.go new file mode 100644 index 000000000000..debe6c7a47d4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_decimal128.go @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "encoding/binary" + "reflect" + "unsafe" + + "github.com/apache/arrow/go/arrow/decimal128" +) + +// Decimal128 traits +var Decimal128Traits decimal128Traits + +const ( + // Decimal128SizeBytes specifies the number of bytes required to store a single decimal128 in memory + Decimal128SizeBytes = int(unsafe.Sizeof(decimal128.Num{})) +) + +type decimal128Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (decimal128Traits) BytesRequired(n int) int { return Decimal128SizeBytes * n } + +// PutValue +func (decimal128Traits) PutValue(b []byte, v decimal128.Num) { + binary.LittleEndian.PutUint64(b[:8], uint64(v.LowBits())) + binary.LittleEndian.PutUint64(b[8:], uint64(v.HighBits())) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (decimal128Traits) CastFromBytes(b []byte) []decimal128.Num { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []decimal128.Num + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Decimal128SizeBytes + s.Cap = h.Cap / Decimal128SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (decimal128Traits) CastToBytes(b []decimal128.Num) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Decimal128SizeBytes + s.Cap = h.Cap * Decimal128SizeBytes + + return res +} + +// Copy copies src to dst. +func (decimal128Traits) Copy(dst, src []decimal128.Num) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_float16.go b/vendor/github.com/apache/arrow/go/arrow/type_traits_float16.go new file mode 100644 index 000000000000..f21ea4d32367 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_float16.go @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "encoding/binary" + "reflect" + "unsafe" + + "github.com/apache/arrow/go/arrow/float16" +) + +// Float16 traits +var Float16Traits float16Traits + +const ( + // Float16SizeBytes specifies the number of bytes required to store a single float16 in memory + Float16SizeBytes = int(unsafe.Sizeof(uint16(0))) +) + +type float16Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (float16Traits) BytesRequired(n int) int { return Float16SizeBytes * n } + +// PutValue +func (float16Traits) PutValue(b []byte, v float16.Num) { + binary.LittleEndian.PutUint16(b, uint16(v.Uint16())) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (float16Traits) CastFromBytes(b []byte) []float16.Num { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []float16.Num + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Float16SizeBytes + s.Cap = h.Cap / Float16SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (float16Traits) CastToBytes(b []float16.Num) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Float16SizeBytes + s.Cap = h.Cap * Float16SizeBytes + + return res +} + +// Copy copies src to dst. +func (float16Traits) Copy(dst, src []float16.Num) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_interval.go b/vendor/github.com/apache/arrow/go/arrow/type_traits_interval.go new file mode 100644 index 000000000000..fcff1e6fd67b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_interval.go @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "encoding/binary" + "reflect" + "unsafe" +) + +var ( + MonthIntervalTraits monthTraits + DayTimeIntervalTraits daytimeTraits +) + +// MonthInterval traits + +const ( + // MonthIntervalSizeBytes specifies the number of bytes required to store a single MonthInterval in memory + MonthIntervalSizeBytes = int(unsafe.Sizeof(MonthInterval(0))) +) + +type monthTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (monthTraits) BytesRequired(n int) int { return MonthIntervalSizeBytes * n } + +// PutValue +func (monthTraits) PutValue(b []byte, v MonthInterval) { + binary.LittleEndian.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type MonthInterval. +// +// NOTE: len(b) must be a multiple of MonthIntervalSizeBytes. +func (monthTraits) CastFromBytes(b []byte) []MonthInterval { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []MonthInterval + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / MonthIntervalSizeBytes + s.Cap = h.Cap / MonthIntervalSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (monthTraits) CastToBytes(b []MonthInterval) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * MonthIntervalSizeBytes + s.Cap = h.Cap * MonthIntervalSizeBytes + + return res +} + +// Copy copies src to dst. +func (monthTraits) Copy(dst, src []MonthInterval) { copy(dst, src) } + +// DayTimeInterval traits + +const ( + // DayTimeIntervalSizeBytes specifies the number of bytes required to store a single DayTimeInterval in memory + DayTimeIntervalSizeBytes = int(unsafe.Sizeof(DayTimeInterval{})) +) + +type daytimeTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (daytimeTraits) BytesRequired(n int) int { return DayTimeIntervalSizeBytes * n } + +// PutValue +func (daytimeTraits) PutValue(b []byte, v DayTimeInterval) { + binary.LittleEndian.PutUint32(b[0:4], uint32(v.Days)) + binary.LittleEndian.PutUint32(b[4:8], uint32(v.Milliseconds)) +} + +// CastFromBytes reinterprets the slice b to a slice of type DayTimeInterval. +// +// NOTE: len(b) must be a multiple of DayTimeIntervalSizeBytes. +func (daytimeTraits) CastFromBytes(b []byte) []DayTimeInterval { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []DayTimeInterval + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / DayTimeIntervalSizeBytes + s.Cap = h.Cap / DayTimeIntervalSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (daytimeTraits) CastToBytes(b []DayTimeInterval) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * DayTimeIntervalSizeBytes + s.Cap = h.Cap * DayTimeIntervalSizeBytes + + return res +} + +// Copy copies src to dst. +func (daytimeTraits) Copy(dst, src []DayTimeInterval) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen.go b/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen.go new file mode 100644 index 000000000000..f98f4947e25e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen.go @@ -0,0 +1,813 @@ +// Code generated by type_traits_numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow // import "github.com/apache/arrow/go/arrow" + +import ( + "encoding/binary" + "math" + "reflect" + "unsafe" +) + +var ( + Int64Traits int64Traits + Uint64Traits uint64Traits + Float64Traits float64Traits + Int32Traits int32Traits + Uint32Traits uint32Traits + Float32Traits float32Traits + Int16Traits int16Traits + Uint16Traits uint16Traits + Int8Traits int8Traits + Uint8Traits uint8Traits + TimestampTraits timestampTraits + Time32Traits time32Traits + Time64Traits time64Traits + Date32Traits date32Traits + Date64Traits date64Traits + DurationTraits durationTraits +) + +// Int64 traits + +const ( + // Int64SizeBytes specifies the number of bytes required to store a single int64 in memory + Int64SizeBytes = int(unsafe.Sizeof(int64(0))) +) + +type int64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int64Traits) BytesRequired(n int) int { return Int64SizeBytes * n } + +// PutValue +func (int64Traits) PutValue(b []byte, v int64) { + binary.LittleEndian.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type int64. +// +// NOTE: len(b) must be a multiple of Int64SizeBytes. +func (int64Traits) CastFromBytes(b []byte) []int64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int64SizeBytes + s.Cap = h.Cap / Int64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int64Traits) CastToBytes(b []int64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int64SizeBytes + s.Cap = h.Cap * Int64SizeBytes + + return res +} + +// Copy copies src to dst. +func (int64Traits) Copy(dst, src []int64) { copy(dst, src) } + +// Uint64 traits + +const ( + // Uint64SizeBytes specifies the number of bytes required to store a single uint64 in memory + Uint64SizeBytes = int(unsafe.Sizeof(uint64(0))) +) + +type uint64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint64Traits) BytesRequired(n int) int { return Uint64SizeBytes * n } + +// PutValue +func (uint64Traits) PutValue(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint64. +// +// NOTE: len(b) must be a multiple of Uint64SizeBytes. +func (uint64Traits) CastFromBytes(b []byte) []uint64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint64SizeBytes + s.Cap = h.Cap / Uint64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint64Traits) CastToBytes(b []uint64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint64SizeBytes + s.Cap = h.Cap * Uint64SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint64Traits) Copy(dst, src []uint64) { copy(dst, src) } + +// Float64 traits + +const ( + // Float64SizeBytes specifies the number of bytes required to store a single float64 in memory + Float64SizeBytes = int(unsafe.Sizeof(float64(0))) +) + +type float64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (float64Traits) BytesRequired(n int) int { return Float64SizeBytes * n } + +// PutValue +func (float64Traits) PutValue(b []byte, v float64) { + binary.LittleEndian.PutUint64(b, math.Float64bits(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type float64. +// +// NOTE: len(b) must be a multiple of Float64SizeBytes. +func (float64Traits) CastFromBytes(b []byte) []float64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []float64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Float64SizeBytes + s.Cap = h.Cap / Float64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (float64Traits) CastToBytes(b []float64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Float64SizeBytes + s.Cap = h.Cap * Float64SizeBytes + + return res +} + +// Copy copies src to dst. +func (float64Traits) Copy(dst, src []float64) { copy(dst, src) } + +// Int32 traits + +const ( + // Int32SizeBytes specifies the number of bytes required to store a single int32 in memory + Int32SizeBytes = int(unsafe.Sizeof(int32(0))) +) + +type int32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int32Traits) BytesRequired(n int) int { return Int32SizeBytes * n } + +// PutValue +func (int32Traits) PutValue(b []byte, v int32) { + binary.LittleEndian.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type int32. +// +// NOTE: len(b) must be a multiple of Int32SizeBytes. +func (int32Traits) CastFromBytes(b []byte) []int32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int32SizeBytes + s.Cap = h.Cap / Int32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int32Traits) CastToBytes(b []int32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int32SizeBytes + s.Cap = h.Cap * Int32SizeBytes + + return res +} + +// Copy copies src to dst. +func (int32Traits) Copy(dst, src []int32) { copy(dst, src) } + +// Uint32 traits + +const ( + // Uint32SizeBytes specifies the number of bytes required to store a single uint32 in memory + Uint32SizeBytes = int(unsafe.Sizeof(uint32(0))) +) + +type uint32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint32Traits) BytesRequired(n int) int { return Uint32SizeBytes * n } + +// PutValue +func (uint32Traits) PutValue(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint32. +// +// NOTE: len(b) must be a multiple of Uint32SizeBytes. +func (uint32Traits) CastFromBytes(b []byte) []uint32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint32SizeBytes + s.Cap = h.Cap / Uint32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint32Traits) CastToBytes(b []uint32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint32SizeBytes + s.Cap = h.Cap * Uint32SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint32Traits) Copy(dst, src []uint32) { copy(dst, src) } + +// Float32 traits + +const ( + // Float32SizeBytes specifies the number of bytes required to store a single float32 in memory + Float32SizeBytes = int(unsafe.Sizeof(float32(0))) +) + +type float32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (float32Traits) BytesRequired(n int) int { return Float32SizeBytes * n } + +// PutValue +func (float32Traits) PutValue(b []byte, v float32) { + binary.LittleEndian.PutUint32(b, math.Float32bits(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type float32. +// +// NOTE: len(b) must be a multiple of Float32SizeBytes. +func (float32Traits) CastFromBytes(b []byte) []float32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []float32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Float32SizeBytes + s.Cap = h.Cap / Float32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (float32Traits) CastToBytes(b []float32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Float32SizeBytes + s.Cap = h.Cap * Float32SizeBytes + + return res +} + +// Copy copies src to dst. +func (float32Traits) Copy(dst, src []float32) { copy(dst, src) } + +// Int16 traits + +const ( + // Int16SizeBytes specifies the number of bytes required to store a single int16 in memory + Int16SizeBytes = int(unsafe.Sizeof(int16(0))) +) + +type int16Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int16Traits) BytesRequired(n int) int { return Int16SizeBytes * n } + +// PutValue +func (int16Traits) PutValue(b []byte, v int16) { + binary.LittleEndian.PutUint16(b, uint16(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type int16. +// +// NOTE: len(b) must be a multiple of Int16SizeBytes. +func (int16Traits) CastFromBytes(b []byte) []int16 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int16 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int16SizeBytes + s.Cap = h.Cap / Int16SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int16Traits) CastToBytes(b []int16) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int16SizeBytes + s.Cap = h.Cap * Int16SizeBytes + + return res +} + +// Copy copies src to dst. +func (int16Traits) Copy(dst, src []int16) { copy(dst, src) } + +// Uint16 traits + +const ( + // Uint16SizeBytes specifies the number of bytes required to store a single uint16 in memory + Uint16SizeBytes = int(unsafe.Sizeof(uint16(0))) +) + +type uint16Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint16Traits) BytesRequired(n int) int { return Uint16SizeBytes * n } + +// PutValue +func (uint16Traits) PutValue(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, uint16(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (uint16Traits) CastFromBytes(b []byte) []uint16 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint16 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint16SizeBytes + s.Cap = h.Cap / Uint16SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint16Traits) CastToBytes(b []uint16) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint16SizeBytes + s.Cap = h.Cap * Uint16SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint16Traits) Copy(dst, src []uint16) { copy(dst, src) } + +// Int8 traits + +const ( + // Int8SizeBytes specifies the number of bytes required to store a single int8 in memory + Int8SizeBytes = int(unsafe.Sizeof(int8(0))) +) + +type int8Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int8Traits) BytesRequired(n int) int { return Int8SizeBytes * n } + +// PutValue +func (int8Traits) PutValue(b []byte, v int8) { + b[0] = byte(v) +} + +// CastFromBytes reinterprets the slice b to a slice of type int8. +// +// NOTE: len(b) must be a multiple of Int8SizeBytes. +func (int8Traits) CastFromBytes(b []byte) []int8 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int8 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int8SizeBytes + s.Cap = h.Cap / Int8SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int8Traits) CastToBytes(b []int8) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int8SizeBytes + s.Cap = h.Cap * Int8SizeBytes + + return res +} + +// Copy copies src to dst. +func (int8Traits) Copy(dst, src []int8) { copy(dst, src) } + +// Uint8 traits + +const ( + // Uint8SizeBytes specifies the number of bytes required to store a single uint8 in memory + Uint8SizeBytes = int(unsafe.Sizeof(uint8(0))) +) + +type uint8Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint8Traits) BytesRequired(n int) int { return Uint8SizeBytes * n } + +// PutValue +func (uint8Traits) PutValue(b []byte, v uint8) { + b[0] = byte(v) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint8. +// +// NOTE: len(b) must be a multiple of Uint8SizeBytes. +func (uint8Traits) CastFromBytes(b []byte) []uint8 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint8 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint8SizeBytes + s.Cap = h.Cap / Uint8SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint8Traits) CastToBytes(b []uint8) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint8SizeBytes + s.Cap = h.Cap * Uint8SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint8Traits) Copy(dst, src []uint8) { copy(dst, src) } + +// Timestamp traits + +const ( + // TimestampSizeBytes specifies the number of bytes required to store a single Timestamp in memory + TimestampSizeBytes = int(unsafe.Sizeof(Timestamp(0))) +) + +type timestampTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (timestampTraits) BytesRequired(n int) int { return TimestampSizeBytes * n } + +// PutValue +func (timestampTraits) PutValue(b []byte, v Timestamp) { + binary.LittleEndian.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Timestamp. +// +// NOTE: len(b) must be a multiple of TimestampSizeBytes. +func (timestampTraits) CastFromBytes(b []byte) []Timestamp { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Timestamp + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / TimestampSizeBytes + s.Cap = h.Cap / TimestampSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (timestampTraits) CastToBytes(b []Timestamp) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * TimestampSizeBytes + s.Cap = h.Cap * TimestampSizeBytes + + return res +} + +// Copy copies src to dst. +func (timestampTraits) Copy(dst, src []Timestamp) { copy(dst, src) } + +// Time32 traits + +const ( + // Time32SizeBytes specifies the number of bytes required to store a single Time32 in memory + Time32SizeBytes = int(unsafe.Sizeof(Time32(0))) +) + +type time32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (time32Traits) BytesRequired(n int) int { return Time32SizeBytes * n } + +// PutValue +func (time32Traits) PutValue(b []byte, v Time32) { + binary.LittleEndian.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Time32. +// +// NOTE: len(b) must be a multiple of Time32SizeBytes. +func (time32Traits) CastFromBytes(b []byte) []Time32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Time32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Time32SizeBytes + s.Cap = h.Cap / Time32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (time32Traits) CastToBytes(b []Time32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Time32SizeBytes + s.Cap = h.Cap * Time32SizeBytes + + return res +} + +// Copy copies src to dst. +func (time32Traits) Copy(dst, src []Time32) { copy(dst, src) } + +// Time64 traits + +const ( + // Time64SizeBytes specifies the number of bytes required to store a single Time64 in memory + Time64SizeBytes = int(unsafe.Sizeof(Time64(0))) +) + +type time64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (time64Traits) BytesRequired(n int) int { return Time64SizeBytes * n } + +// PutValue +func (time64Traits) PutValue(b []byte, v Time64) { + binary.LittleEndian.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Time64. +// +// NOTE: len(b) must be a multiple of Time64SizeBytes. +func (time64Traits) CastFromBytes(b []byte) []Time64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Time64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Time64SizeBytes + s.Cap = h.Cap / Time64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (time64Traits) CastToBytes(b []Time64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Time64SizeBytes + s.Cap = h.Cap * Time64SizeBytes + + return res +} + +// Copy copies src to dst. +func (time64Traits) Copy(dst, src []Time64) { copy(dst, src) } + +// Date32 traits + +const ( + // Date32SizeBytes specifies the number of bytes required to store a single Date32 in memory + Date32SizeBytes = int(unsafe.Sizeof(Date32(0))) +) + +type date32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (date32Traits) BytesRequired(n int) int { return Date32SizeBytes * n } + +// PutValue +func (date32Traits) PutValue(b []byte, v Date32) { + binary.LittleEndian.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Date32. +// +// NOTE: len(b) must be a multiple of Date32SizeBytes. +func (date32Traits) CastFromBytes(b []byte) []Date32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Date32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Date32SizeBytes + s.Cap = h.Cap / Date32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (date32Traits) CastToBytes(b []Date32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Date32SizeBytes + s.Cap = h.Cap * Date32SizeBytes + + return res +} + +// Copy copies src to dst. +func (date32Traits) Copy(dst, src []Date32) { copy(dst, src) } + +// Date64 traits + +const ( + // Date64SizeBytes specifies the number of bytes required to store a single Date64 in memory + Date64SizeBytes = int(unsafe.Sizeof(Date64(0))) +) + +type date64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (date64Traits) BytesRequired(n int) int { return Date64SizeBytes * n } + +// PutValue +func (date64Traits) PutValue(b []byte, v Date64) { + binary.LittleEndian.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Date64. +// +// NOTE: len(b) must be a multiple of Date64SizeBytes. +func (date64Traits) CastFromBytes(b []byte) []Date64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Date64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Date64SizeBytes + s.Cap = h.Cap / Date64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (date64Traits) CastToBytes(b []Date64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Date64SizeBytes + s.Cap = h.Cap * Date64SizeBytes + + return res +} + +// Copy copies src to dst. +func (date64Traits) Copy(dst, src []Date64) { copy(dst, src) } + +// Duration traits + +const ( + // DurationSizeBytes specifies the number of bytes required to store a single Duration in memory + DurationSizeBytes = int(unsafe.Sizeof(Duration(0))) +) + +type durationTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (durationTraits) BytesRequired(n int) int { return DurationSizeBytes * n } + +// PutValue +func (durationTraits) PutValue(b []byte, v Duration) { + binary.LittleEndian.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Duration. +// +// NOTE: len(b) must be a multiple of DurationSizeBytes. +func (durationTraits) CastFromBytes(b []byte) []Duration { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Duration + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / DurationSizeBytes + s.Cap = h.Cap / DurationSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (durationTraits) CastToBytes(b []Duration) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * DurationSizeBytes + s.Cap = h.Cap * DurationSizeBytes + + return res +} + +// Copy copies src to dst. +func (durationTraits) Copy(dst, src []Duration) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen.go.tmpl new file mode 100644 index 000000000000..c4a25ee3256f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen.go.tmpl @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow // import "github.com/apache/arrow/go/arrow" + +import ( + "encoding/binary" + "math" + "reflect" + "unsafe" +) + +var ( +{{range .In}} + {{.Name}}Traits {{.name}}Traits +{{- end}} +) + +{{range .In}} +// {{.Name}} traits + +const ( + // {{.Name}}SizeBytes specifies the number of bytes required to store a single {{.Type}} in memory + {{.Name}}SizeBytes = int(unsafe.Sizeof({{.Type}}({{.Default}}))) +) + +type {{.name}}Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func ({{.name}}Traits) BytesRequired(n int) int { return {{.Name}}SizeBytes * n } + +// PutValue +func ({{.name}}Traits) PutValue(b []byte, v {{.Type}}) { +{{- if eq .Type "float32" -}} + binary.LittleEndian.PutUint32(b, math.Float32bits(v)) +{{- else if eq .Type "float64" -}} + binary.LittleEndian.PutUint64(b, math.Float64bits(v)) +{{- else if eq .Size "1" -}} + b[0] = byte(v) +{{- else if eq .Size "2" -}} + binary.LittleEndian.PutUint16(b, uint16(v)) +{{- else if eq .Size "4" -}} + binary.LittleEndian.PutUint32(b, uint32(v)) +{{- else if eq .Size "8" -}} + binary.LittleEndian.PutUint64(b, uint64(v)) +{{- else -}} + panic("invalid type {{.Type}}") +{{end}} +} + +// CastFromBytes reinterprets the slice b to a slice of type {{.Type}}. +// +// NOTE: len(b) must be a multiple of {{.Name}}SizeBytes. +func ({{.name}}Traits) CastFromBytes(b []byte) []{{.Type}} { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []{{.Type}} + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len/{{.Name}}SizeBytes + s.Cap = h.Cap/{{.Name}}SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func ({{.name}}Traits) CastToBytes(b []{{.Type}}) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len*{{.Name}}SizeBytes + s.Cap = h.Cap*{{.Name}}SizeBytes + + return res +} + +// Copy copies src to dst. +func ({{.name}}Traits) Copy(dst, src []{{.Type}}) { copy(dst, src) } +{{end}} diff --git a/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen_test.go.tmpl new file mode 100644 index 000000000000..5a0e269b5d5b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/arrow/type_traits_numeric.gen_test.go.tmpl @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow_test + +import ( + "reflect" + "testing" + + "github.com/apache/arrow/go/arrow" +) + +{{- range .In}} + +func Test{{.Name}}Traits(t *testing.T) { + const N = 10 + b1 := arrow.{{.Name}}Traits.CastToBytes([]{{or .QualifiedType .Type}}{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + }) + + b2 := make([]byte, arrow.{{.Name}}Traits.BytesRequired(N)) + for i := 0; i < N; i++ { + beg := i * arrow.{{.Name}}SizeBytes + end := (i + 1) * arrow.{{.Name}}SizeBytes + arrow.{{.Name}}Traits.PutValue(b2[beg:end], {{or .QualifiedType .Type}}(i)) + } + + if !reflect.DeepEqual(b1, b2) { + v1 := arrow.{{.Name}}Traits.CastFromBytes(b1) + v2 := arrow.{{.Name}}Traits.CastFromBytes(b2) + t.Fatalf("invalid values:\nb1=%v\nb2=%v\nv1=%v\nv2=%v\n", b1, b2, v1, v2) + } + + v1 := arrow.{{.Name}}Traits.CastFromBytes(b1) + for i, v := range v1 { + if got, want := v, {{or .QualifiedType .Type}}(i); got != want { + t.Fatalf("invalid value[%d]. got=%v, want=%v", i, got, want) + } + } + + v2 := make([]{{or .QualifiedType .Type}}, N) + arrow.{{.Name}}Traits.Copy(v2, v1) + + if !reflect.DeepEqual(v1, v2) { + t.Fatalf("invalid values:\nv1=%v\nv2=%v\n", v1, v2) + } +} +{{end}} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/.gitignore b/vendor/github.com/dgrijalva/jwt-go/v4/.gitignore new file mode 100644 index 000000000000..80bed650ec03 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/v4/.travis.yml new file mode 100644 index 000000000000..ade3c5831968 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/.travis.yml @@ -0,0 +1,11 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - "1.11" + - "1.12" + - "1.13" + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/LICENSE b/vendor/github.com/dgrijalva/jwt-go/v4/LICENSE new file mode 100644 index 000000000000..df83a9c2f019 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/v4/MIGRATION_GUIDE.md new file mode 100644 index 000000000000..343afd8144f3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/MIGRATION_GUIDE.md @@ -0,0 +1,101 @@ +## Migration Guide from v3 -> v4 + +TODO: write this + +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/README.md b/vendor/github.com/dgrijalva/jwt-go/v4/README.md new file mode 100644 index 000000000000..230589bf69a1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/README.md @@ -0,0 +1,101 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION:** Version 4 of this library is now available. This is the first non-backward-compatible version in a long time. There are a few changes that all users will notice, such as the new types introduced in members of `StandardClaims`. More changes are additive or only impact more advanced use. See VERSION_HISTORY.md for a list of changes as well as **TODO** MIGRATION_GUIDE.md for help updating your code. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +- [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +- [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +- [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +- In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +As of version 4, this project is compatible with go modules. You should use that to ensure you have no unpleasant surprises when updating. + +**BREAKING CHANGES:\*** + +- Version 4.0.0 includes _a lot_ of changes from the 3.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +- The author of the token was in the possession of the signing secret +- The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +- The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +- The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +- The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +- OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +- OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +- Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/v4/VERSION_HISTORY.md new file mode 100644 index 000000000000..fffaa37f8777 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/VERSION_HISTORY.md @@ -0,0 +1,141 @@ +## `jwt-go` Version History + +#### 4.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Errors have been updated significantly to take advantage of the changes to errors in go1.13/go2/xerrors + * The previous 'enum' describing error types has been replaced with unique types for every kind of error + * The new error types carry more information and interoperate properly with errors.As + * Dropping (official) support for go1.10 or older. Older versions dating back to 1.4 **may** continue to work, but are not being considered or tested against. 1.3 and previous will no longer work with this library. + * Behavior of time values has changed significantly, primarily to handle nil values, but also to be more consistent with Go: + * All time values used by the library are expressed using time.Time and time.Duration. This includes automatic parsing and encoding of the JWT unix timestamp format. + * `StandardClaims` time values use the new `Time` type, which wraps time.Time to handle things nil values gracefully + * The method for describing custom parsing/validating behaviors has changed. The properties exposed on Parser have been replaced with `ParserOption`s. See https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis for the theory. See `ParserOption` and `SigningOption` for more details + * Per the spec, if the `aud` claim is present, it will be automatically validated. See `WithAudience` and `WithoutAudienceValidation` + * The `Valid` method on `Claims` now takes an argument, `ValidationHelper`. +* Added support for Leeway. If you have issues with clock sku between the issuing machine and the consuming machine, you can allow for a grace period. See `WithLeeway` +* Added support for custom JSON encoder/decoders. See `WithMarshaller` and `WithUnmarshaller` +* Added support for issuer validation. See `WithIssuer` +* Updated error messages and comments to comply with linter recommendations +* Added `KnownKeyfunc` for when you know both the signing method and the key without needing to look at the token Header. This should dramatically simplify many common use cases. +* Added `ValidationHelper` to make it easier to implement custom Claims types without having to rewrite a bunch of built-in behavior, such as time comparison and leeway. `ValidationHelper` is built with `ParserOptions` and provides the same methods used by built in claims types to handle validation. +* Added support for `crypto.Signer` on several signing methods. This was a common request. +* Added new type, `ClaimStrings`, which will correctly handle properties such as `aud` that can be either an array of strings or a single string. `ClaimStrings` is an alias to `[]string`, but with custom decoding behavior. This means all the built in `aud` validation behavior now expects `[]string` instead of `string`. This was a common request. + + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/claim_strings.go b/vendor/github.com/dgrijalva/jwt-go/v4/claim_strings.go new file mode 100644 index 000000000000..057b684da047 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/claim_strings.go @@ -0,0 +1,47 @@ +package jwt + +import ( + "encoding/json" + "reflect" +) + +// ClaimStrings is used for parsing claim properties that +// can be either a string or array of strings +type ClaimStrings []string + +// ParseClaimStrings is used to produce a ClaimStrings value +// from the various forms it may present during encoding/decodeing +func ParseClaimStrings(value interface{}) (ClaimStrings, error) { + switch v := value.(type) { + case string: + return ClaimStrings{v}, nil + case []string: + return ClaimStrings(v), nil + case []interface{}: + result := make(ClaimStrings, 0, len(v)) + for i, vv := range v { + if x, ok := vv.(string); ok { + result = append(result, x) + } else { + return nil, &json.UnsupportedTypeError{Type: reflect.TypeOf(v[i])} + } + } + return result, nil + case nil: + return nil, nil + default: + return nil, &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } +} + +// UnmarshalJSON implements the json package's Unmarshaler interface +func (c *ClaimStrings) UnmarshalJSON(data []byte) error { + var value interface{} + err := json.Unmarshal(data, &value) + if err != nil { + return err + } + + *c, err = ParseClaimStrings(value) + return err +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/claims.go b/vendor/github.com/dgrijalva/jwt-go/v4/claims.go new file mode 100644 index 000000000000..a065328bc2b6 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/claims.go @@ -0,0 +1,64 @@ +package jwt + +// Claims is the interface used to hold the claims values of a token +// For a type to be a Claims object, it must have a Valid method that determines +// if the token is invalid for any supported reason +// Claims are parsed and encoded using the standard library's encoding/json +// package. Claims are passed directly to that. +type Claims interface { + // A nil validation helper should use the default helper + Valid(*ValidationHelper) error +} + +// StandardClaims is a structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience ClaimStrings `json:"aud,omitempty"` + ExpiresAt *Time `json:"exp,omitempty"` + ID string `json:"jti,omitempty"` + IssuedAt *Time `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore *Time `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates standard claims using ValidationHelper +// Validates time based claims "exp, nbf" (see: WithLeeway) +// Validates "aud" if present in claims. (see: WithAudience, WithoutAudienceValidation) +// Validates "iss" if option is provided (see: WithIssuer) +func (c StandardClaims) Valid(h *ValidationHelper) error { + var vErr error + + if h == nil { + h = DefaultValidationHelper + } + + if err := h.ValidateExpiresAt(c.ExpiresAt); err != nil { + vErr = wrapError(err, vErr) + } + + if err := h.ValidateNotBefore(c.NotBefore); err != nil { + vErr = wrapError(err, vErr) + } + + if err := h.ValidateAudience(c.Audience); err != nil { + vErr = wrapError(err, vErr) + } + + if err := h.ValidateIssuer(c.Issuer); err != nil { + vErr = wrapError(err, vErr) + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +func (c *StandardClaims) VerifyAudience(h *ValidationHelper, cmp string) error { + return h.ValidateAudienceAgainst(c.Audience, cmp) +} + +// VerifyIssuer compares the iss claim against cmp. +func (c *StandardClaims) VerifyIssuer(h *ValidationHelper, cmp string) error { + return h.ValidateIssuerAgainst(c.Issuer, cmp) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/doc.go b/vendor/github.com/dgrijalva/jwt-go/v4/doc.go new file mode 100644 index 000000000000..a86dc1a3b348 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa.go new file mode 100644 index 000000000000..9a0d6083453a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa.go @@ -0,0 +1,173 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "math/big" +) + +// SigningMethodECDSA implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Mirrors the struct from crypto/ecdsa, we expect ecdsa.PrivateKey.Sign function to return this struct asn1 encoded +type ecdsaSignature struct { + R, S *big.Int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +// Alg implements SigningMethod +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + var ok bool + + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + case crypto.Signer: + pub := k.Public() + if ecdsaKey, ok = pub.(*ecdsa.PublicKey); !ok { + return &InvalidKeyError{Message: fmt.Sprintf("crypto.Signer returned an unexpected public key type: %T", pub)} + } + default: + return NewInvalidKeyTypeError("*ecdsa.PublicKey or crypto.Signer", key) + } + + if len(sig) != 2*m.KeySize { + return &UnverfiableTokenError{Message: "signature length is invalid"} + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } + return new(InvalidSignatureError) +} + +// Sign implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + var signer crypto.Signer + var pub *ecdsa.PublicKey + var ok bool + + if signer, ok = key.(crypto.Signer); !ok { + return "", NewInvalidKeyTypeError("*ecdsa.PrivateKey or crypto.Signer", key) + } + + //sanity check that the signer is an ecdsa signer + if pub, ok = signer.Public().(*ecdsa.PublicKey); !ok { + return "", &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + asn1Sig, err := signer.Sign(rand.Reader, hasher.Sum(nil), m.Hash) + if err != nil { + return "", err + } + + //the ecdsa.PrivateKey Sign function returns an asn1 encoded signature which is not what we want + // so we unmarshal it to get r and s to encode as described in rfc7518 section-3.4 + var ecdsaSig ecdsaSignature + rest, err := asn1.Unmarshal(asn1Sig, &ecdsaSig) + if err != nil { + return "", err + } + + if len(rest) != 0 { + return "", &UnverfiableTokenError{Message: "unexpected extra bytes in ecda signature"} + } + + curveBits := pub.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", &InvalidKeyError{Message: "CurveBits in public key don't match those in signing method"} + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the output (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := ecdsaSig.R.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := ecdsaSig.S.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa_utils.go new file mode 100644 index 000000000000..627e7db3cf34 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/ecdsa_utils.go @@ -0,0 +1,70 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +// Errors returned by EC signing methods +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM is a helper function for +// parsing a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM is a helper function for +// parsing a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/errors.go b/vendor/github.com/dgrijalva/jwt-go/v4/errors.go new file mode 100644 index 000000000000..d7a53ba7f7fa --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/errors.go @@ -0,0 +1,204 @@ +package jwt + +import ( + "fmt" + "time" +) + +// Error constants +var ( + ErrHashUnavailable = new(HashUnavailableError) +) + +// Embeds b within a, if a is a valid wrapper. returns a +// If a is not a valid wrapper, b is dropped +// If one of the errors is nil, the other is returned +func wrapError(a, b error) error { + if b == nil { + return a + } + if a == nil { + return b + } + + type iErrorWrapper interface { + Wrap(error) + } + if w, ok := a.(iErrorWrapper); ok { + w.Wrap(b) + } + return a +} + +// ErrorWrapper provides a simple, concrete helper for implementing nestable errors +type ErrorWrapper struct{ err error } + +// Unwrap implements xerrors.Wrapper +func (w ErrorWrapper) Unwrap() error { + return w.err +} + +// Wrap stores the provided error value and returns it when Unwrap is called +func (w ErrorWrapper) Wrap(err error) { + w.err = err +} + +// InvalidKeyError is returned if the key is unusable for some reason other than type +type InvalidKeyError struct { + Message string + ErrorWrapper +} + +func (e *InvalidKeyError) Error() string { + return fmt.Sprintf("key is invalid: %v", e.Message) +} + +// InvalidKeyTypeError is returned if the key is unusable because it is of an incompatible type +type InvalidKeyTypeError struct { + Expected, Received string // String descriptions of expected and received types + ErrorWrapper +} + +func (e *InvalidKeyTypeError) Error() string { + if e.Expected == "" && e.Received == "" { + return "key is of invalid type" + } + return fmt.Sprintf("key is of invalid type: expected %v, received %v", e.Expected, e.Received) +} + +// NewInvalidKeyTypeError creates an InvalidKeyTypeError, automatically capturing the type +// of received +func NewInvalidKeyTypeError(expected string, received interface{}) error { + return &InvalidKeyTypeError{Expected: expected, Received: fmt.Sprintf("%T", received)} +} + +// MalformedTokenError means the token failed to parse or exhibits some other +// non-standard property that prevents it being processed by this library +type MalformedTokenError struct { + Message string + ErrorWrapper +} + +func (e *MalformedTokenError) Error() string { + if e.Message == "" { + return "token is malformed" + } + return fmt.Sprintf("token is malformed: %v", e.Message) +} + +// UnverfiableTokenError means there's something wrong with the signature that prevents +// this library from verifying it. +type UnverfiableTokenError struct { + Message string + ErrorWrapper +} + +func (e *UnverfiableTokenError) Error() string { + if e.Message == "" { + return "token is unverifiable" + } + return fmt.Sprintf("token is unverifiable: %v", e.Message) +} + +// InvalidSignatureError means the signature on the token is invalid +type InvalidSignatureError struct { + Message string + ErrorWrapper +} + +func (e *InvalidSignatureError) Error() string { + if e.Message == "" { + return "token signature is invalid" + } + return fmt.Sprintf("token signature is invalid: %v", e.Message) +} + +// TokenExpiredError allows the caller to know the delta between now and the expired time and the unvalidated claims. +// A client system may have a bug that doesn't refresh a token in time, or there may be clock skew so this information can help you understand. +type TokenExpiredError struct { + At time.Time // The time at which the exp was evaluated. Includes leeway. + ExpiredBy time.Duration // How long the token had been expired at time of evaluation + ErrorWrapper // Value for unwrapping +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("token is expired by %v", e.ExpiredBy) +} + +// TokenNotValidYetError means the token failed the 'nbf' check. It's possible +// this token will become valid once the 'nbf' time is reached. If you are encountering +// this unexpectedly, you may want to provide a bit of Leeway to account for clock skew. See WithLeeway +type TokenNotValidYetError struct { + At time.Time // The time at which the exp was evaluated. Includes leeway. + EarlyBy time.Duration // How long the token had been expired at time of evaluation + ErrorWrapper // Value for unwrapping +} + +func (e *TokenNotValidYetError) Error() string { + return fmt.Sprintf("token is not valid yet; wait %v", e.EarlyBy) +} + +// InvalidAudienceError means the token failed the audience check +// per the spec, if an 'aud' claim is present, the value must be verified +// See: WithAudience and WithoutAudienceValidation +type InvalidAudienceError struct { + Message string + ErrorWrapper +} + +func (e *InvalidAudienceError) Error() string { + if e.Message == "" { + return "token audience is invalid" + } + return fmt.Sprintf("token audience is invalid: %v", e.Message) +} + +// InvalidIssuerError means the token failed issuer validation +// Issuer validation is only run, by default, if the WithIssuer option is provided +type InvalidIssuerError struct { + Message string + ErrorWrapper +} + +func (e *InvalidIssuerError) Error() string { + if e.Message == "" { + return "token issuer is invalid" + } + return fmt.Sprintf("token issuer is invalid: %v", e.Message) +} + +// InvalidClaimsError is a catchall type for claims errors that don't have their own type +type InvalidClaimsError struct { + Message string + ErrorWrapper +} + +func (e *InvalidClaimsError) Error() string { + if e.Message == "" { + return "token claim is invalid" + } + return fmt.Sprintf("token claim is invalid: %v", e.Message) +} + +// SigningError is a catchall type for signing errors +type SigningError struct { + Message string + ErrorWrapper +} + +func (e *SigningError) Error() string { + if e.Message == "" { + return "error encountered during signing" + } + return fmt.Sprintf("error encountered during signing: %v", e.Message) +} + +// HashUnavailableError measn the request hash function isn't available +// See: https://godoc.org/crypto#Hash.Available +type HashUnavailableError struct { + ErrorWrapper +} + +func (e *HashUnavailableError) Error() string { + return "the requested hash function is unavailable" +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/go.mod b/vendor/github.com/dgrijalva/jwt-go/v4/go.mod new file mode 100644 index 000000000000..284213dea7c4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/go.mod @@ -0,0 +1,5 @@ +module github.com/dgrijalva/jwt-go/v4 + +go 1.12 + +require golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/go.sum b/vendor/github.com/dgrijalva/jwt-go/v4/go.sum new file mode 100644 index 000000000000..3ab73eafaed1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/go.sum @@ -0,0 +1,2 @@ +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/hmac.go b/vendor/github.com/dgrijalva/jwt-go/v4/hmac.go new file mode 100644 index 000000000000..2c9101257e99 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/hmac.go @@ -0,0 +1,97 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +// Alg implements SigningMethod +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return NewInvalidKeyTypeError("[]byte", key) + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements the Sign method from SigningMethod +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + keyBytes, ok := key.([]byte) + if !ok { + return "", NewInvalidKeyTypeError("[]byte", key) + } + + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/keyfunc.go b/vendor/github.com/dgrijalva/jwt-go/v4/keyfunc.go new file mode 100644 index 000000000000..2f15a8fc94ea --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/keyfunc.go @@ -0,0 +1,21 @@ +package jwt + +import "fmt" + +// Keyfunc is the type passed to Parse methods to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// KnownKeyfunc is a helper for generating a Keyfunc from a known +// signing method and key. If your implementation only supports a single signing method +// and key, this is for you. +func KnownKeyfunc(signingMethod SigningMethod, key interface{}) Keyfunc { + return func(t *Token) (interface{}, error) { + if signingMethod.Alg() != t.Header["alg"] { + return nil, fmt.Errorf("unexpected signing method: %v, expected: %v", t.Header["alg"], signingMethod.Alg()) + } + return key, nil + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/v4/map_claims.go new file mode 100644 index 000000000000..d721c3388cf9 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/map_claims.go @@ -0,0 +1,83 @@ +package jwt + +// MapClaims is the Claims type that uses the map[string]interface{} for JSON decoding +// This is the default Claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience compares the aud claim against cmp. +func (m MapClaims) VerifyAudience(h *ValidationHelper, cmp string) error { + if aud, err := ParseClaimStrings(m["aud"]); err == nil && aud != nil { + return h.ValidateAudienceAgainst(aud, cmp) + } else if err != nil { + return &MalformedTokenError{Message: "couldn't parse 'aud' value"} + } + return nil +} + +// VerifyIssuer compares the iss claim against cmp. +func (m MapClaims) VerifyIssuer(h *ValidationHelper, cmp string) error { + iss, ok := m["iss"].(string) + if !ok { + return &InvalidIssuerError{Message: "'iss' expected but not present"} + } + return h.ValidateIssuerAgainst(iss, cmp) +} + +// Valid validates standard claims using ValidationHelper +// Validates time based claims "exp, nbf" (see: WithLeeway) +// Validates "aud" if present in claims. (see: WithAudience, WithoutAudienceValidation) +// Validates "iss" if option is provided (see: WithIssuer) +func (m MapClaims) Valid(h *ValidationHelper) error { + var vErr error + + if h == nil { + h = DefaultValidationHelper + } + + exp, err := m.LoadTimeValue("exp") + if err != nil { + return err + } + + if err = h.ValidateExpiresAt(exp); err != nil { + vErr = wrapError(err, vErr) + } + + nbf, err := m.LoadTimeValue("nbf") + if err != nil { + return err + } + + if err = h.ValidateNotBefore(nbf); err != nil { + vErr = wrapError(err, vErr) + } + + // Try to parse the 'aud' claim + if aud, err := ParseClaimStrings(m["aud"]); err == nil && aud != nil { + // If it's present and well formed, validate + if err = h.ValidateAudience(aud); err != nil { + vErr = wrapError(err, vErr) + } + } else if err != nil { + // If it's present and not well formed, return an error + return &MalformedTokenError{Message: "couldn't parse 'aud' value"} + } + + iss, _ := m["iss"].(string) + if err = h.ValidateIssuer(iss); err != nil { + vErr = wrapError(err, vErr) + } + + return vErr +} + +// LoadTimeValue extracts a *Time value from a key in m +func (m MapClaims) LoadTimeValue(key string) (*Time, error) { + value, ok := m[key] + if !ok { + // No value present in map + return nil, nil + } + + return ParseTime(value) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/none.go b/vendor/github.com/dgrijalva/jwt-go/v4/none.go new file mode 100644 index 000000000000..a5caed37b485 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/none.go @@ -0,0 +1,54 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +// UnsafeAllowNoneSignatureType must be returned from Keyfunc in order for the +// none signing method to be allowed. This is intended to make is possible to use +// this signing method, but not by accident +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +// NoneSignatureTypeDisallowedError is the error value returned when the none signing method +// is used without UnsafeAllowNoneSignatureType +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = &InvalidSignatureError{Message: "'none' signature type is not allowed"} + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return &InvalidSignatureError{Message: "'none' signing method with non-empty signature"} + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/parser.go b/vendor/github.com/dgrijalva/jwt-go/v4/parser.go new file mode 100644 index 000000000000..1f1a9c09e777 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/parser.go @@ -0,0 +1,168 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// Parser is the type used to parse and validate a JWT token from string +type Parser struct { + validMethods []string // If populated, only these methods will be considered valid + useJSONNumber bool // Use JSON Number format in JSON decoder + skipClaimsValidation bool // Skip claims validation during token parsing + unmarshaller TokenUnmarshaller // Use this instead of encoding/json + *ValidationHelper +} + +// NewParser returns a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{ + ValidationHelper: new(ValidationHelper), + } + for _, option := range options { + option(p) + } + return p +} + +// Parse will parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +// ParseWithClaims is just like parse, but with the claims type specified +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.validMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.validMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, &UnverfiableTokenError{Message: fmt.Sprintf("signing method %v is invalid", alg)} + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, &UnverfiableTokenError{Message: "no Keyfunc was provided."} + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, wrapError(&UnverfiableTokenError{Message: "Keyfunc returned an error"}, err) + } + + var vErr error + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr = wrapError(&InvalidSignatureError{}, err) + } + + // Validate Claims + if !p.skipClaimsValidation && vErr == nil { + if err := token.Claims.Valid(p.ValidationHelper); err != nil { + vErr = wrapError(err, vErr) + } + } + + if vErr == nil { + token.Valid = true + } + + return token, vErr +} + +// ParseUnverified is used to inspect a token without validating it +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. Or for debuggery. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, &MalformedTokenError{Message: "token contains an invalid number of segments"} + } + + token = &Token{Raw: tokenString} + + // choose unmarshaller + var unmarshaller = p.unmarshaller + if unmarshaller == nil { + unmarshaller = p.defaultUnmarshaller + } + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, &MalformedTokenError{Message: "tokenstring should not contain 'bearer '"} + } + return token, parts, wrapError(&MalformedTokenError{Message: "failed to decode token header"}, err) + } + if err = unmarshaller(CodingContext{HeaderFieldDescriptor, nil}, headerBytes, &token.Header); err != nil { + return token, parts, wrapError(&MalformedTokenError{Message: "failed to unmarshal token header"}, err) + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, wrapError(&MalformedTokenError{Message: "failed to decode token claims"}, err) + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + ctx := CodingContext{ClaimsFieldDescriptor, token.Header} + if c, ok := token.Claims.(MapClaims); ok { + err = unmarshaller(ctx, claimBytes, &c) + } else { + err = unmarshaller(ctx, claimBytes, &claims) + } + // Handle decode error + if err != nil { + return token, parts, wrapError(&MalformedTokenError{Message: "failed to unmarshal token claims"}, err) + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, &UnverfiableTokenError{Message: "signing method (alg) is unavailable."} + } + } else { + return token, parts, &UnverfiableTokenError{Message: "signing method (alg) is unspecified."} + } + + return token, parts, nil +} + +func (p *Parser) defaultUnmarshaller(ctx CodingContext, data []byte, v interface{}) error { + // If we don't need a special parser, use Unmarshal + // We never use a special encoder for the header + if !p.useJSONNumber || ctx.FieldDescriptor == HeaderFieldDescriptor { + return json.Unmarshal(data, v) + } + + // To enable the JSONNumber mode, we must use Decoder instead of Unmarshal + dec := json.NewDecoder(bytes.NewBuffer(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/parser_option.go b/vendor/github.com/dgrijalva/jwt-go/v4/parser_option.go new file mode 100644 index 000000000000..fd285ebe9727 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/parser_option.go @@ -0,0 +1,74 @@ +package jwt + +import "time" + +// ParserOption implements functional options for parser behavior +// see: https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type ParserOption func(*Parser) + +// WithValidMethods returns the ParserOption for specifying valid signing methods +func WithValidMethods(valid []string) ParserOption { + return func(p *Parser) { + p.validMethods = valid + } +} + +// WithJSONNumber returns the ParserOption for using json.Number instead of float64 when parsing +// numeric values. Used most commonly with MapClaims, but it can be useful in some cases with +// structured claims types +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.useJSONNumber = true + } +} + +// WithoutClaimsValidation returns the ParserOption for disabling claims validation +// This does not disable signature validation. Use this if you want intend to implement +// claims validation via other means +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.skipClaimsValidation = true + } +} + +// WithLeeway returns the ParserOption for specifying the leeway window. +func WithLeeway(d time.Duration) ParserOption { + return func(p *Parser) { + p.ValidationHelper.leeway = d + } +} + +// WithAudience returns the ParserOption for specifying an expected aud member value +func WithAudience(aud string) ParserOption { + return func(p *Parser) { + p.ValidationHelper.audience = &aud + } +} + +// WithoutAudienceValidation returns the ParserOption that specifies audience check should be skipped +func WithoutAudienceValidation() ParserOption { + return func(p *Parser) { + p.ValidationHelper.skipAudience = true + } +} + +// WithIssuer returns the ParserOption that specifies a value to compare against the iss claim +func WithIssuer(iss string) ParserOption { + return func(p *Parser) { + p.ValidationHelper.issuer = &iss + } +} + +// TokenUnmarshaller is the function signature required to supply custom JSON decoding logic. +// It is the same as json.Marshal with the addition of the FieldDescriptor. +// The field value will let your marshaller know which field is being processed. +// This is to facilitate things like compression, where you wouldn't want to compress +// the head. +type TokenUnmarshaller func(ctx CodingContext, data []byte, v interface{}) error + +// WithUnmarshaller returns the ParserOption that replaces the specified decoder +func WithUnmarshaller(um TokenUnmarshaller) ParserOption { + return func(p *Parser) { + p.unmarshaller = um + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/rsa.go b/vendor/github.com/dgrijalva/jwt-go/v4/rsa.go new file mode 100644 index 000000000000..72ba2d6632b3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/rsa.go @@ -0,0 +1,115 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +// SigningMethodRSA implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +// Alg implements the Alg method from SigningMethod +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + case crypto.Signer: + pub := k.Public() + if rsaKey, ok = pub.(*rsa.PublicKey); !ok { + return &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + default: + return NewInvalidKeyTypeError("*rsa.PublicKey or crypto.Signer", key) + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var signer crypto.Signer + var ok bool + + if signer, ok = key.(crypto.Signer); !ok { + return "", NewInvalidKeyTypeError("*rsa.PublicKey or crypto.Signer", key) + } + + //sanity check that the signer is an rsa signer + if pub, ok := signer.Public().(*rsa.PublicKey); !ok { + return "", &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + sigBytes, err := signer.Sign(rand.Reader, hasher.Sum(nil), m.Hash) + if err != nil { + return "", err + } + return EncodeSegment(sigBytes), nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_pss.go new file mode 100644 index 000000000000..aa3ba39973a1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_pss.go @@ -0,0 +1,138 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + case crypto.Signer: + pub := k.Public() + if rsaKey, ok = pub.(*rsa.PublicKey); !ok { + return &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + default: + return NewInvalidKeyTypeError("*rsa.PublicKey or crypto.Signer", key) + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Sign implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var signer crypto.Signer + var ok bool + + if signer, ok = key.(crypto.Signer); !ok { + return "", NewInvalidKeyTypeError("*rsa.PrivateKey or crypto.Signer", key) + } + + //sanity check that the signer is an rsa signer + if pub, ok := signer.Public().(*rsa.PublicKey); !ok { + return "", &InvalidKeyError{Message: fmt.Sprintf("signer returned unexpected public key type: %T", pub)} + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + sigBytes, err := signer.Sign(rand.Reader, hasher.Sum(nil), m.Options) + if err != nil { + return "", err + } + return EncodeSegment(sigBytes), nil + +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_utils.go new file mode 100644 index 000000000000..b0dae4bfb33c --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/rsa_utils.go @@ -0,0 +1,105 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +// Errors returned by RSA Signing Method and helpers +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM is a helper method for +// parsing PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword is a helper method for +// parsing PEM encoded PKCS1 or PKCS8 private key, encrypted with a password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM is a helper method for +// parsing a PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/v4/signing_method.go new file mode 100644 index 000000000000..5e50243ef3cf --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/signing_method.go @@ -0,0 +1,37 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod is the interface used for signing and verifying tokens +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod stores the "alg" name and a factory function pair +// used internally for looking up a signing method based on "alg". +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod returns the signing method registered by RegisterSigningMethod +// This is used by the library internally during parsing and validation. +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/signing_option.go b/vendor/github.com/dgrijalva/jwt-go/v4/signing_option.go new file mode 100644 index 000000000000..cecead297082 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/signing_option.go @@ -0,0 +1,38 @@ +package jwt + +// CodingContext provides context to TokenMarshaller and TokenUnmarshaller +type CodingContext struct { + FieldDescriptor // Which field are we encoding/decoding? + Header map[string]interface{} // The token Header, if available +} + +// FieldDescriptor describes which field is being processed. Used by CodingContext +// This is to enable the marshaller to treat the head and body differently +type FieldDescriptor uint8 + +// Constants describe which field is being processed by custom Marshaller +const ( + HeaderFieldDescriptor FieldDescriptor = 0 + ClaimsFieldDescriptor FieldDescriptor = 1 +) + +// SigningOption can be passed to signing related methods on Token to customize behavior +type SigningOption func(*signingOptions) + +type signingOptions struct { + marshaller TokenMarshaller +} + +// TokenMarshaller is the interface you must implement to provide custom JSON marshalling +// behavior. It is the same as json.Marshal with the addition of the FieldDescriptor. +// The field value will let your marshaller know which field is being processed. +// This is to facilitate things like compression, where you wouldn't want to compress +// the head. +type TokenMarshaller func(ctx CodingContext, v interface{}) ([]byte, error) + +// WithMarshaller returns a SigningOption that will tell the signing code to use your custom Marshaller +func WithMarshaller(m TokenMarshaller) SigningOption { + return func(o *signingOptions) { + o.marshaller = m + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/time.go b/vendor/github.com/dgrijalva/jwt-go/v4/time.go new file mode 100644 index 000000000000..aee72aeeeb7b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/time.go @@ -0,0 +1,78 @@ +package jwt + +import ( + "encoding/json" + "reflect" + "time" +) + +// TimePrecision determines how precisely time is measured +// by this library. When serializing and deserialzing tokens, +// time values are automatically truncated to this precision. +// See the time package's Truncate method for more detail +const TimePrecision = time.Microsecond + +// Time is how this library represents time values. It's mostly +// a wrapper for the standard library's time.Time, but adds +// specialized JSON decoding behavior to interop with the way +// time is represented by JWT. Also makes it possible to represent +// nil values. +type Time struct { + time.Time +} + +// NewTime creates a new Time value from a float64, following +// the JWT spec. +func NewTime(t float64) *Time { + return At(time.Unix(0, int64(t*float64(time.Second)))) +} + +// Now returns a new Time value using the current time. +// You can override Now by changing the value of TimeFunc +func Now() *Time { + return At(TimeFunc()) +} + +// At makes a Time value from a standard library time.Time value +func At(at time.Time) *Time { + return &Time{at.Truncate(TimePrecision)} +} + +// ParseTime is used for creating a Time value from various +// possible representations that can occur in serialization. +func ParseTime(value interface{}) (*Time, error) { + switch v := value.(type) { + case int64: + return NewTime(float64(v)), nil + case float64: + return NewTime(v), nil + case json.Number: + vv, err := v.Float64() + if err != nil { + return nil, err + } + return NewTime(vv), nil + case nil: + return nil, nil + default: + return nil, &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } +} + +// UnmarshalJSON implements the json package's Unmarshaler interface +func (t *Time) UnmarshalJSON(data []byte) error { + var value json.Number + err := json.Unmarshal(data, &value) + if err != nil { + return err + } + v, err := ParseTime(value) + *t = *v + return err +} + +// MarshalJSON implements the json package's Marshaler interface +func (t *Time) MarshalJSON() ([]byte, error) { + f := float64(t.Truncate(TimePrecision).UnixNano()) / float64(time.Second) + return json.Marshal(f) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/token.go b/vendor/github.com/dgrijalva/jwt-go/v4/token.go new file mode 100644 index 000000000000..5ab1eb55346a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/token.go @@ -0,0 +1,110 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Token represents JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token. Takes a signing method. Uses the default claims type, MapClaims. +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +// NewWithClaims creats a new token with a specified signing method and claims type +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString returns the complete, signed token +func (t *Token) SignedString(key interface{}, opts ...SigningOption) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(opts...); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString(opts ...SigningOption) (string, error) { + // Process options + var cfg = new(signingOptions) + for _, opt := range opts { + opt(cfg) + } + // Setup default marshaller + if cfg.marshaller == nil { + cfg.marshaller = t.defaultMarshaller + } + + // Encode the two parts, then combine + inputParts := []interface{}{t.Header, t.Claims} + parts := make([]string, 2) + for i, v := range inputParts { + ctx := CodingContext{FieldDescriptor(i), t.Header} + jsonValue, err := cfg.marshaller(ctx, v) + if err != nil { + return "", err + } + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +func (t *Token) defaultMarshaller(ctx CodingContext, v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Parse then validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +// Claims type will be the default, MapClaims +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is Parse, but with a specified Claims type +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment is used internally for JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment is used internally for JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/v4/validation_helper.go b/vendor/github.com/dgrijalva/jwt-go/v4/validation_helper.go new file mode 100644 index 000000000000..2edd0a464768 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/v4/validation_helper.go @@ -0,0 +1,151 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// DefaultValidationHelper is used by Claims.Valid if none is provided +var DefaultValidationHelper = &ValidationHelper{} + +// ValidationHelper is built by the parser and passed +// to Claims.Value to carry parse/validation options +// This standalone type exists to allow implementations to do whatever custom +// behavior is required while still being able to call upon the standard behavior +// as necessary. +type ValidationHelper struct { + nowFunc func() time.Time // Override for time.Now. Mostly used for testing + leeway time.Duration // Leeway to provide when validating time values + audience *string // Expected audience value + skipAudience bool // Ignore aud check + issuer *string // Expected issuer value. ignored if nil +} + +// NewValidationHelper creates a validation helper from a list of parser options +// Not all parser options will impact validation +// If you already have a custom parser, you can use its ValidationHelper value +// instead of creating a new one +func NewValidationHelper(options ...ParserOption) *ValidationHelper { + p := NewParser(options...) + return p.ValidationHelper +} + +func (h *ValidationHelper) now() time.Time { + if h.nowFunc != nil { + return h.nowFunc() + } + return TimeFunc() +} + +// Before returns true if Now is before t +// Takes leeway into account +func (h *ValidationHelper) Before(t time.Time) bool { + return h.now().Before(t.Add(-h.leeway)) +} + +// After returns true if Now is after t +// Takes leeway into account +func (h *ValidationHelper) After(t time.Time) bool { + return h.now().After(t.Add(h.leeway)) +} + +// ValidateExpiresAt returns an error if the expiration time is invalid +// Takes leeway into account +func (h *ValidationHelper) ValidateExpiresAt(exp *Time) error { + // 'exp' claim is not set. ignore. + if exp == nil { + return nil + } + + // Expiration has passed + if h.After(exp.Time) { + delta := h.now().Sub(exp.Time) + return &TokenExpiredError{At: h.now(), ExpiredBy: delta} + } + + // Expiration has not passed + return nil +} + +// ValidateNotBefore returns an error if the nbf time has not been reached +// Takes leeway into account +func (h *ValidationHelper) ValidateNotBefore(nbf *Time) error { + // 'nbf' claim is not set. ignore. + if nbf == nil { + return nil + } + + // Nbf hasn't been reached + if h.Before(nbf.Time) { + delta := nbf.Time.Sub(h.now()) + return &TokenNotValidYetError{At: h.now(), EarlyBy: delta} + } + // Nbf has been reached. valid. + return nil +} + +// ValidateAudience verifies that aud contains the audience value provided +// by the WithAudience option. +// Per the spec (https://tools.ietf.org/html/rfc7519#section-4.1.3), if the aud +// claim is present, +func (h *ValidationHelper) ValidateAudience(aud ClaimStrings) error { + // Skip flag + if h.skipAudience { + return nil + } + + // If there's no audience claim, ignore + if aud == nil || len(aud) == 0 { + return nil + } + + // If there is an audience claim, but no value provided, fail + if h.audience == nil { + return &InvalidAudienceError{Message: "audience value was expected but not provided"} + } + + return h.ValidateAudienceAgainst(aud, *h.audience) +} + +// ValidateAudienceAgainst checks that the compare value is included in the aud list +// It is used by ValidateAudience, but exposed as a helper for other implementations +func (h *ValidationHelper) ValidateAudienceAgainst(aud ClaimStrings, compare string) error { + if aud == nil { + return nil + } + + // Compare provided value with aud claim. + // This code avoids the early return to make this check more or less constant time. + // I'm not certain that's actually required in this context. + var match = false + for _, audStr := range aud { + if subtle.ConstantTimeCompare([]byte(audStr), []byte(compare)) == 1 { + match = true + } + } + if !match { + return &InvalidAudienceError{Message: fmt.Sprintf("'%v' wasn't found in aud claim", compare)} + } + return nil + +} + +// ValidateIssuer checks the claim value against the value provided by WithIssuer +func (h *ValidationHelper) ValidateIssuer(iss string) error { + // Always passes validation if issuer is not provided + if h.issuer == nil { + return nil + } + + return h.ValidateIssuerAgainst(iss, *h.issuer) +} + +// ValidateIssuerAgainst checks the claim value against the value provided, ignoring the WithIssuer value +func (h *ValidationHelper) ValidateIssuerAgainst(iss string, compare string) error { + if subtle.ConstantTimeCompare([]byte(iss), []byte(compare)) == 1 { + return nil + } + + return &InvalidIssuerError{Message: "'iss' value doesn't match expectation"} +} diff --git a/vendor/github.com/google/flatbuffers/LICENSE.txt b/vendor/github.com/google/flatbuffers/LICENSE.txt new file mode 100644 index 000000000000..a4c5efd822fb --- /dev/null +++ b/vendor/github.com/google/flatbuffers/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/flatbuffers/go/BUILD.bazel b/vendor/github.com/google/flatbuffers/go/BUILD.bazel new file mode 100644 index 000000000000..026e89c19305 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go", + srcs = [ + "builder.go", + "doc.go", + "encode.go", + "grpc.go", + "lib.go", + "sizes.go", + "struct.go", + "table.go", + ], + importpath = "github.com/google/flatbuffers/go", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/flatbuffers/go/builder.go b/vendor/github.com/google/flatbuffers/go/builder.go new file mode 100644 index 000000000000..8d75700ce24d --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/builder.go @@ -0,0 +1,771 @@ +package flatbuffers + +// Builder is a state machine for creating FlatBuffer objects. +// Use a Builder to construct object(s) starting from leaf nodes. +// +// A Builder constructs byte buffers in a last-first manner for simplicity and +// performance. +type Builder struct { + // `Bytes` gives raw access to the buffer. Most users will want to use + // FinishedBytes() instead. + Bytes []byte + + minalign int + vtable []UOffsetT + objectEnd UOffsetT + vtables []UOffsetT + head UOffsetT + nested bool + finished bool +} + +const fileIdentifierLength = 4 + +// NewBuilder initializes a Builder of size `initial_size`. +// The internal buffer is grown as needed. +func NewBuilder(initialSize int) *Builder { + if initialSize <= 0 { + initialSize = 0 + } + + b := &Builder{} + b.Bytes = make([]byte, initialSize) + b.head = UOffsetT(initialSize) + b.minalign = 1 + b.vtables = make([]UOffsetT, 0, 16) // sensible default capacity + + return b +} + +// Reset truncates the underlying Builder buffer, facilitating alloc-free +// reuse of a Builder. It also resets bookkeeping data. +func (b *Builder) Reset() { + if b.Bytes != nil { + b.Bytes = b.Bytes[:cap(b.Bytes)] + } + + if b.vtables != nil { + b.vtables = b.vtables[:0] + } + + if b.vtable != nil { + b.vtable = b.vtable[:0] + } + + b.head = UOffsetT(len(b.Bytes)) + b.minalign = 1 + b.nested = false + b.finished = false +} + +// FinishedBytes returns a pointer to the written data in the byte buffer. +// Panics if the builder is not in a finished state (which is caused by calling +// `Finish()`). +func (b *Builder) FinishedBytes() []byte { + b.assertFinished() + return b.Bytes[b.Head():] +} + +// StartObject initializes bookkeeping for writing a new object. +func (b *Builder) StartObject(numfields int) { + b.assertNotNested() + b.nested = true + + // use 32-bit offsets so that arithmetic doesn't overflow. + if cap(b.vtable) < numfields || b.vtable == nil { + b.vtable = make([]UOffsetT, numfields) + } else { + b.vtable = b.vtable[:numfields] + for i := 0; i < len(b.vtable); i++ { + b.vtable[i] = 0 + } + } + + b.objectEnd = b.Offset() +} + +// WriteVtable serializes the vtable for the current object, if applicable. +// +// Before writing out the vtable, this checks pre-existing vtables for equality +// to this one. If an equal vtable is found, point the object to the existing +// vtable and return. +// +// Because vtable values are sensitive to alignment of object data, not all +// logically-equal vtables will be deduplicated. +// +// A vtable has the following format: +// +// +// * N, where N is the number of fields in +// the schema for this type. Includes deprecated fields. +// Thus, a vtable is made of 2 + N elements, each SizeVOffsetT bytes wide. +// +// An object has the following format: +// +// + +func (b *Builder) WriteVtable() (n UOffsetT) { + // Prepend a zero scalar to the object. Later in this function we'll + // write an offset here that points to the object's vtable: + b.PrependSOffsetT(0) + + objectOffset := b.Offset() + existingVtable := UOffsetT(0) + + // Trim vtable of trailing zeroes. + i := len(b.vtable) - 1 + for ; i >= 0 && b.vtable[i] == 0; i-- { + } + b.vtable = b.vtable[:i+1] + + // Search backwards through existing vtables, because similar vtables + // are likely to have been recently appended. See + // BenchmarkVtableDeduplication for a case in which this heuristic + // saves about 30% of the time used in writing objects with duplicate + // tables. + for i := len(b.vtables) - 1; i >= 0; i-- { + // Find the other vtable, which is associated with `i`: + vt2Offset := b.vtables[i] + vt2Start := len(b.Bytes) - int(vt2Offset) + vt2Len := GetVOffsetT(b.Bytes[vt2Start:]) + + metadata := VtableMetadataFields * SizeVOffsetT + vt2End := vt2Start + int(vt2Len) + vt2 := b.Bytes[vt2Start+metadata : vt2End] + + // Compare the other vtable to the one under consideration. + // If they are equal, store the offset and break: + if vtableEqual(b.vtable, objectOffset, vt2) { + existingVtable = vt2Offset + break + } + } + + if existingVtable == 0 { + // Did not find a vtable, so write this one to the buffer. + + // Write out the current vtable in reverse , because + // serialization occurs in last-first order: + for i := len(b.vtable) - 1; i >= 0; i-- { + var off UOffsetT + if b.vtable[i] != 0 { + // Forward reference to field; + // use 32bit number to assert no overflow: + off = objectOffset - b.vtable[i] + } + + b.PrependVOffsetT(VOffsetT(off)) + } + + // The two metadata fields are written last. + + // First, store the object bytesize: + objectSize := objectOffset - b.objectEnd + b.PrependVOffsetT(VOffsetT(objectSize)) + + // Second, store the vtable bytesize: + vBytes := (len(b.vtable) + VtableMetadataFields) * SizeVOffsetT + b.PrependVOffsetT(VOffsetT(vBytes)) + + // Next, write the offset to the new vtable in the + // already-allocated SOffsetT at the beginning of this object: + objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset) + WriteSOffsetT(b.Bytes[objectStart:], + SOffsetT(b.Offset())-SOffsetT(objectOffset)) + + // Finally, store this vtable in memory for future + // deduplication: + b.vtables = append(b.vtables, b.Offset()) + } else { + // Found a duplicate vtable. + + objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset) + b.head = UOffsetT(objectStart) + + // Write the offset to the found vtable in the + // already-allocated SOffsetT at the beginning of this object: + WriteSOffsetT(b.Bytes[b.head:], + SOffsetT(existingVtable)-SOffsetT(objectOffset)) + } + + b.vtable = b.vtable[:0] + return objectOffset +} + +// EndObject writes data necessary to finish object construction. +func (b *Builder) EndObject() UOffsetT { + b.assertNested() + n := b.WriteVtable() + b.nested = false + return n +} + +// Doubles the size of the byteslice, and copies the old data towards the +// end of the new byteslice (since we build the buffer backwards). +func (b *Builder) growByteBuffer() { + if (int64(len(b.Bytes)) & int64(0xC0000000)) != 0 { + panic("cannot grow buffer beyond 2 gigabytes") + } + newLen := len(b.Bytes) * 2 + if newLen == 0 { + newLen = 1 + } + + if cap(b.Bytes) >= newLen { + b.Bytes = b.Bytes[:newLen] + } else { + extension := make([]byte, newLen-len(b.Bytes)) + b.Bytes = append(b.Bytes, extension...) + } + + middle := newLen / 2 + copy(b.Bytes[middle:], b.Bytes[:middle]) +} + +// Head gives the start of useful data in the underlying byte buffer. +// Note: unlike other functions, this value is interpreted as from the left. +func (b *Builder) Head() UOffsetT { + return b.head +} + +// Offset relative to the end of the buffer. +func (b *Builder) Offset() UOffsetT { + return UOffsetT(len(b.Bytes)) - b.head +} + +// Pad places zeros at the current offset. +func (b *Builder) Pad(n int) { + for i := 0; i < n; i++ { + b.PlaceByte(0) + } +} + +// Prep prepares to write an element of `size` after `additional_bytes` +// have been written, e.g. if you write a string, you need to align such +// the int length field is aligned to SizeInt32, and the string data follows it +// directly. +// If all you need to do is align, `additionalBytes` will be 0. +func (b *Builder) Prep(size, additionalBytes int) { + // Track the biggest thing we've ever aligned to. + if size > b.minalign { + b.minalign = size + } + // Find the amount of alignment needed such that `size` is properly + // aligned after `additionalBytes`: + alignSize := (^(len(b.Bytes) - int(b.Head()) + additionalBytes)) + 1 + alignSize &= (size - 1) + + // Reallocate the buffer if needed: + for int(b.head) <= alignSize+size+additionalBytes { + oldBufSize := len(b.Bytes) + b.growByteBuffer() + b.head += UOffsetT(len(b.Bytes) - oldBufSize) + } + b.Pad(alignSize) +} + +// PrependSOffsetT prepends an SOffsetT, relative to where it will be written. +func (b *Builder) PrependSOffsetT(off SOffsetT) { + b.Prep(SizeSOffsetT, 0) // Ensure alignment is already done. + if !(UOffsetT(off) <= b.Offset()) { + panic("unreachable: off <= b.Offset()") + } + off2 := SOffsetT(b.Offset()) - off + SOffsetT(SizeSOffsetT) + b.PlaceSOffsetT(off2) +} + +// PrependUOffsetT prepends an UOffsetT, relative to where it will be written. +func (b *Builder) PrependUOffsetT(off UOffsetT) { + b.Prep(SizeUOffsetT, 0) // Ensure alignment is already done. + if !(off <= b.Offset()) { + panic("unreachable: off <= b.Offset()") + } + off2 := b.Offset() - off + UOffsetT(SizeUOffsetT) + b.PlaceUOffsetT(off2) +} + +// StartVector initializes bookkeeping for writing a new vector. +// +// A vector has the following format: +// +// +, where T is the type of elements of this vector. +func (b *Builder) StartVector(elemSize, numElems, alignment int) UOffsetT { + b.assertNotNested() + b.nested = true + b.Prep(SizeUint32, elemSize*numElems) + b.Prep(alignment, elemSize*numElems) // Just in case alignment > int. + return b.Offset() +} + +// EndVector writes data necessary to finish vector construction. +func (b *Builder) EndVector(vectorNumElems int) UOffsetT { + b.assertNested() + + // we already made space for this, so write without PrependUint32 + b.PlaceUOffsetT(UOffsetT(vectorNumElems)) + + b.nested = false + return b.Offset() +} + +// CreateString writes a null-terminated string as a vector. +func (b *Builder) CreateString(s string) UOffsetT { + b.assertNotNested() + b.nested = true + + b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte) + b.PlaceByte(0) + + l := UOffsetT(len(s)) + + b.head -= l + copy(b.Bytes[b.head:b.head+l], s) + + return b.EndVector(len(s)) +} + +// CreateByteString writes a byte slice as a string (null-terminated). +func (b *Builder) CreateByteString(s []byte) UOffsetT { + b.assertNotNested() + b.nested = true + + b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte) + b.PlaceByte(0) + + l := UOffsetT(len(s)) + + b.head -= l + copy(b.Bytes[b.head:b.head+l], s) + + return b.EndVector(len(s)) +} + +// CreateByteVector writes a ubyte vector +func (b *Builder) CreateByteVector(v []byte) UOffsetT { + b.assertNotNested() + b.nested = true + + b.Prep(int(SizeUOffsetT), len(v)*SizeByte) + + l := UOffsetT(len(v)) + + b.head -= l + copy(b.Bytes[b.head:b.head+l], v) + + return b.EndVector(len(v)) +} + +func (b *Builder) assertNested() { + // If you get this assert, you're in an object while trying to write + // data that belongs outside of an object. + // To fix this, write non-inline data (like vectors) before creating + // objects. + if !b.nested { + panic("Incorrect creation order: must be inside object.") + } +} + +func (b *Builder) assertNotNested() { + // If you hit this, you're trying to construct a Table/Vector/String + // during the construction of its parent table (between the MyTableBuilder + // and builder.Finish()). + // Move the creation of these sub-objects to above the MyTableBuilder to + // not get this assert. + // Ignoring this assert may appear to work in simple cases, but the reason + // it is here is that storing objects in-line may cause vtable offsets + // to not fit anymore. It also leads to vtable duplication. + if b.nested { + panic("Incorrect creation order: object must not be nested.") + } +} + +func (b *Builder) assertFinished() { + // If you get this assert, you're attempting to get access a buffer + // which hasn't been finished yet. Be sure to call builder.Finish() + // with your root table. + // If you really need to access an unfinished buffer, use the Bytes + // buffer directly. + if !b.finished { + panic("Incorrect use of FinishedBytes(): must call 'Finish' first.") + } +} + +// PrependBoolSlot prepends a bool onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependBoolSlot(o int, x, d bool) { + val := byte(0) + if x { + val = 1 + } + def := byte(0) + if d { + def = 1 + } + b.PrependByteSlot(o, val, def) +} + +// PrependByteSlot prepends a byte onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependByteSlot(o int, x, d byte) { + if x != d { + b.PrependByte(x) + b.Slot(o) + } +} + +// PrependUint8Slot prepends a uint8 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint8Slot(o int, x, d uint8) { + if x != d { + b.PrependUint8(x) + b.Slot(o) + } +} + +// PrependUint16Slot prepends a uint16 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint16Slot(o int, x, d uint16) { + if x != d { + b.PrependUint16(x) + b.Slot(o) + } +} + +// PrependUint32Slot prepends a uint32 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint32Slot(o int, x, d uint32) { + if x != d { + b.PrependUint32(x) + b.Slot(o) + } +} + +// PrependUint64Slot prepends a uint64 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint64Slot(o int, x, d uint64) { + if x != d { + b.PrependUint64(x) + b.Slot(o) + } +} + +// PrependInt8Slot prepends a int8 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt8Slot(o int, x, d int8) { + if x != d { + b.PrependInt8(x) + b.Slot(o) + } +} + +// PrependInt16Slot prepends a int16 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt16Slot(o int, x, d int16) { + if x != d { + b.PrependInt16(x) + b.Slot(o) + } +} + +// PrependInt32Slot prepends a int32 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt32Slot(o int, x, d int32) { + if x != d { + b.PrependInt32(x) + b.Slot(o) + } +} + +// PrependInt64Slot prepends a int64 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt64Slot(o int, x, d int64) { + if x != d { + b.PrependInt64(x) + b.Slot(o) + } +} + +// PrependFloat32Slot prepends a float32 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependFloat32Slot(o int, x, d float32) { + if x != d { + b.PrependFloat32(x) + b.Slot(o) + } +} + +// PrependFloat64Slot prepends a float64 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependFloat64Slot(o int, x, d float64) { + if x != d { + b.PrependFloat64(x) + b.Slot(o) + } +} + +// PrependUOffsetTSlot prepends an UOffsetT onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUOffsetTSlot(o int, x, d UOffsetT) { + if x != d { + b.PrependUOffsetT(x) + b.Slot(o) + } +} + +// PrependStructSlot prepends a struct onto the object at vtable slot `o`. +// Structs are stored inline, so nothing additional is being added. +// In generated code, `d` is always 0. +func (b *Builder) PrependStructSlot(voffset int, x, d UOffsetT) { + if x != d { + b.assertNested() + if x != b.Offset() { + panic("inline data write outside of object") + } + b.Slot(voffset) + } +} + +// Slot sets the vtable key `voffset` to the current location in the buffer. +func (b *Builder) Slot(slotnum int) { + b.vtable[slotnum] = UOffsetT(b.Offset()) +} + +// FinishWithFileIdentifier finalizes a buffer, pointing to the given `rootTable`. +// as well as applys a file identifier +func (b *Builder) FinishWithFileIdentifier(rootTable UOffsetT, fid []byte) { + if fid == nil || len(fid) != fileIdentifierLength { + panic("incorrect file identifier length") + } + // In order to add a file identifier to the flatbuffer message, we need + // to prepare an alignment and file identifier length + b.Prep(b.minalign, SizeInt32+fileIdentifierLength) + for i := fileIdentifierLength - 1; i >= 0; i-- { + // place the file identifier + b.PlaceByte(fid[i]) + } + // finish + b.Finish(rootTable) +} + +// Finish finalizes a buffer, pointing to the given `rootTable`. +func (b *Builder) Finish(rootTable UOffsetT) { + b.assertNotNested() + b.Prep(b.minalign, SizeUOffsetT) + b.PrependUOffsetT(rootTable) + b.finished = true +} + +// vtableEqual compares an unwritten vtable to a written vtable. +func vtableEqual(a []UOffsetT, objectStart UOffsetT, b []byte) bool { + if len(a)*SizeVOffsetT != len(b) { + return false + } + + for i := 0; i < len(a); i++ { + x := GetVOffsetT(b[i*SizeVOffsetT : (i+1)*SizeVOffsetT]) + + // Skip vtable entries that indicate a default value. + if x == 0 && a[i] == 0 { + continue + } + + y := SOffsetT(objectStart) - SOffsetT(a[i]) + if SOffsetT(x) != y { + return false + } + } + return true +} + +// PrependBool prepends a bool to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependBool(x bool) { + b.Prep(SizeBool, 0) + b.PlaceBool(x) +} + +// PrependUint8 prepends a uint8 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint8(x uint8) { + b.Prep(SizeUint8, 0) + b.PlaceUint8(x) +} + +// PrependUint16 prepends a uint16 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint16(x uint16) { + b.Prep(SizeUint16, 0) + b.PlaceUint16(x) +} + +// PrependUint32 prepends a uint32 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint32(x uint32) { + b.Prep(SizeUint32, 0) + b.PlaceUint32(x) +} + +// PrependUint64 prepends a uint64 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint64(x uint64) { + b.Prep(SizeUint64, 0) + b.PlaceUint64(x) +} + +// PrependInt8 prepends a int8 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt8(x int8) { + b.Prep(SizeInt8, 0) + b.PlaceInt8(x) +} + +// PrependInt16 prepends a int16 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt16(x int16) { + b.Prep(SizeInt16, 0) + b.PlaceInt16(x) +} + +// PrependInt32 prepends a int32 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt32(x int32) { + b.Prep(SizeInt32, 0) + b.PlaceInt32(x) +} + +// PrependInt64 prepends a int64 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt64(x int64) { + b.Prep(SizeInt64, 0) + b.PlaceInt64(x) +} + +// PrependFloat32 prepends a float32 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependFloat32(x float32) { + b.Prep(SizeFloat32, 0) + b.PlaceFloat32(x) +} + +// PrependFloat64 prepends a float64 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependFloat64(x float64) { + b.Prep(SizeFloat64, 0) + b.PlaceFloat64(x) +} + +// PrependByte prepends a byte to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependByte(x byte) { + b.Prep(SizeByte, 0) + b.PlaceByte(x) +} + +// PrependVOffsetT prepends a VOffsetT to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependVOffsetT(x VOffsetT) { + b.Prep(SizeVOffsetT, 0) + b.PlaceVOffsetT(x) +} + +// PlaceBool prepends a bool to the Builder, without checking for space. +func (b *Builder) PlaceBool(x bool) { + b.head -= UOffsetT(SizeBool) + WriteBool(b.Bytes[b.head:], x) +} + +// PlaceUint8 prepends a uint8 to the Builder, without checking for space. +func (b *Builder) PlaceUint8(x uint8) { + b.head -= UOffsetT(SizeUint8) + WriteUint8(b.Bytes[b.head:], x) +} + +// PlaceUint16 prepends a uint16 to the Builder, without checking for space. +func (b *Builder) PlaceUint16(x uint16) { + b.head -= UOffsetT(SizeUint16) + WriteUint16(b.Bytes[b.head:], x) +} + +// PlaceUint32 prepends a uint32 to the Builder, without checking for space. +func (b *Builder) PlaceUint32(x uint32) { + b.head -= UOffsetT(SizeUint32) + WriteUint32(b.Bytes[b.head:], x) +} + +// PlaceUint64 prepends a uint64 to the Builder, without checking for space. +func (b *Builder) PlaceUint64(x uint64) { + b.head -= UOffsetT(SizeUint64) + WriteUint64(b.Bytes[b.head:], x) +} + +// PlaceInt8 prepends a int8 to the Builder, without checking for space. +func (b *Builder) PlaceInt8(x int8) { + b.head -= UOffsetT(SizeInt8) + WriteInt8(b.Bytes[b.head:], x) +} + +// PlaceInt16 prepends a int16 to the Builder, without checking for space. +func (b *Builder) PlaceInt16(x int16) { + b.head -= UOffsetT(SizeInt16) + WriteInt16(b.Bytes[b.head:], x) +} + +// PlaceInt32 prepends a int32 to the Builder, without checking for space. +func (b *Builder) PlaceInt32(x int32) { + b.head -= UOffsetT(SizeInt32) + WriteInt32(b.Bytes[b.head:], x) +} + +// PlaceInt64 prepends a int64 to the Builder, without checking for space. +func (b *Builder) PlaceInt64(x int64) { + b.head -= UOffsetT(SizeInt64) + WriteInt64(b.Bytes[b.head:], x) +} + +// PlaceFloat32 prepends a float32 to the Builder, without checking for space. +func (b *Builder) PlaceFloat32(x float32) { + b.head -= UOffsetT(SizeFloat32) + WriteFloat32(b.Bytes[b.head:], x) +} + +// PlaceFloat64 prepends a float64 to the Builder, without checking for space. +func (b *Builder) PlaceFloat64(x float64) { + b.head -= UOffsetT(SizeFloat64) + WriteFloat64(b.Bytes[b.head:], x) +} + +// PlaceByte prepends a byte to the Builder, without checking for space. +func (b *Builder) PlaceByte(x byte) { + b.head -= UOffsetT(SizeByte) + WriteByte(b.Bytes[b.head:], x) +} + +// PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space. +func (b *Builder) PlaceVOffsetT(x VOffsetT) { + b.head -= UOffsetT(SizeVOffsetT) + WriteVOffsetT(b.Bytes[b.head:], x) +} + +// PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space. +func (b *Builder) PlaceSOffsetT(x SOffsetT) { + b.head -= UOffsetT(SizeSOffsetT) + WriteSOffsetT(b.Bytes[b.head:], x) +} + +// PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space. +func (b *Builder) PlaceUOffsetT(x UOffsetT) { + b.head -= UOffsetT(SizeUOffsetT) + WriteUOffsetT(b.Bytes[b.head:], x) +} diff --git a/vendor/github.com/google/flatbuffers/go/doc.go b/vendor/github.com/google/flatbuffers/go/doc.go new file mode 100644 index 000000000000..694edc763d81 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/doc.go @@ -0,0 +1,3 @@ +// Package flatbuffers provides facilities to read and write flatbuffers +// objects. +package flatbuffers diff --git a/vendor/github.com/google/flatbuffers/go/encode.go b/vendor/github.com/google/flatbuffers/go/encode.go new file mode 100644 index 000000000000..72d4f3a15212 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/encode.go @@ -0,0 +1,238 @@ +package flatbuffers + +import ( + "math" +) + +type ( + // A SOffsetT stores a signed offset into arbitrary data. + SOffsetT int32 + // A UOffsetT stores an unsigned offset into vector data. + UOffsetT uint32 + // A VOffsetT stores an unsigned offset in a vtable. + VOffsetT uint16 +) + +const ( + // VtableMetadataFields is the count of metadata fields in each vtable. + VtableMetadataFields = 2 +) + +// GetByte decodes a little-endian byte from a byte slice. +func GetByte(buf []byte) byte { + return byte(GetUint8(buf)) +} + +// GetBool decodes a little-endian bool from a byte slice. +func GetBool(buf []byte) bool { + return buf[0] == 1 +} + +// GetUint8 decodes a little-endian uint8 from a byte slice. +func GetUint8(buf []byte) (n uint8) { + n = uint8(buf[0]) + return +} + +// GetUint16 decodes a little-endian uint16 from a byte slice. +func GetUint16(buf []byte) (n uint16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + n |= uint16(buf[0]) + n |= uint16(buf[1]) << 8 + return +} + +// GetUint32 decodes a little-endian uint32 from a byte slice. +func GetUint32(buf []byte) (n uint32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + n |= uint32(buf[0]) + n |= uint32(buf[1]) << 8 + n |= uint32(buf[2]) << 16 + n |= uint32(buf[3]) << 24 + return +} + +// GetUint64 decodes a little-endian uint64 from a byte slice. +func GetUint64(buf []byte) (n uint64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + n |= uint64(buf[0]) + n |= uint64(buf[1]) << 8 + n |= uint64(buf[2]) << 16 + n |= uint64(buf[3]) << 24 + n |= uint64(buf[4]) << 32 + n |= uint64(buf[5]) << 40 + n |= uint64(buf[6]) << 48 + n |= uint64(buf[7]) << 56 + return +} + +// GetInt8 decodes a little-endian int8 from a byte slice. +func GetInt8(buf []byte) (n int8) { + n = int8(buf[0]) + return +} + +// GetInt16 decodes a little-endian int16 from a byte slice. +func GetInt16(buf []byte) (n int16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + n |= int16(buf[0]) + n |= int16(buf[1]) << 8 + return +} + +// GetInt32 decodes a little-endian int32 from a byte slice. +func GetInt32(buf []byte) (n int32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + n |= int32(buf[0]) + n |= int32(buf[1]) << 8 + n |= int32(buf[2]) << 16 + n |= int32(buf[3]) << 24 + return +} + +// GetInt64 decodes a little-endian int64 from a byte slice. +func GetInt64(buf []byte) (n int64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + n |= int64(buf[0]) + n |= int64(buf[1]) << 8 + n |= int64(buf[2]) << 16 + n |= int64(buf[3]) << 24 + n |= int64(buf[4]) << 32 + n |= int64(buf[5]) << 40 + n |= int64(buf[6]) << 48 + n |= int64(buf[7]) << 56 + return +} + +// GetFloat32 decodes a little-endian float32 from a byte slice. +func GetFloat32(buf []byte) float32 { + x := GetUint32(buf) + return math.Float32frombits(x) +} + +// GetFloat64 decodes a little-endian float64 from a byte slice. +func GetFloat64(buf []byte) float64 { + x := GetUint64(buf) + return math.Float64frombits(x) +} + +// GetUOffsetT decodes a little-endian UOffsetT from a byte slice. +func GetUOffsetT(buf []byte) UOffsetT { + return UOffsetT(GetInt32(buf)) +} + +// GetSOffsetT decodes a little-endian SOffsetT from a byte slice. +func GetSOffsetT(buf []byte) SOffsetT { + return SOffsetT(GetInt32(buf)) +} + +// GetVOffsetT decodes a little-endian VOffsetT from a byte slice. +func GetVOffsetT(buf []byte) VOffsetT { + return VOffsetT(GetUint16(buf)) +} + +// WriteByte encodes a little-endian uint8 into a byte slice. +func WriteByte(buf []byte, n byte) { + WriteUint8(buf, uint8(n)) +} + +// WriteBool encodes a little-endian bool into a byte slice. +func WriteBool(buf []byte, b bool) { + buf[0] = 0 + if b { + buf[0] = 1 + } +} + +// WriteUint8 encodes a little-endian uint8 into a byte slice. +func WriteUint8(buf []byte, n uint8) { + buf[0] = byte(n) +} + +// WriteUint16 encodes a little-endian uint16 into a byte slice. +func WriteUint16(buf []byte, n uint16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) +} + +// WriteUint32 encodes a little-endian uint32 into a byte slice. +func WriteUint32(buf []byte, n uint32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) +} + +// WriteUint64 encodes a little-endian uint64 into a byte slice. +func WriteUint64(buf []byte, n uint64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) + buf[4] = byte(n >> 32) + buf[5] = byte(n >> 40) + buf[6] = byte(n >> 48) + buf[7] = byte(n >> 56) +} + +// WriteInt8 encodes a little-endian int8 into a byte slice. +func WriteInt8(buf []byte, n int8) { + buf[0] = byte(n) +} + +// WriteInt16 encodes a little-endian int16 into a byte slice. +func WriteInt16(buf []byte, n int16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) +} + +// WriteInt32 encodes a little-endian int32 into a byte slice. +func WriteInt32(buf []byte, n int32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) +} + +// WriteInt64 encodes a little-endian int64 into a byte slice. +func WriteInt64(buf []byte, n int64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) + buf[4] = byte(n >> 32) + buf[5] = byte(n >> 40) + buf[6] = byte(n >> 48) + buf[7] = byte(n >> 56) +} + +// WriteFloat32 encodes a little-endian float32 into a byte slice. +func WriteFloat32(buf []byte, n float32) { + WriteUint32(buf, math.Float32bits(n)) +} + +// WriteFloat64 encodes a little-endian float64 into a byte slice. +func WriteFloat64(buf []byte, n float64) { + WriteUint64(buf, math.Float64bits(n)) +} + +// WriteVOffsetT encodes a little-endian VOffsetT into a byte slice. +func WriteVOffsetT(buf []byte, n VOffsetT) { + WriteUint16(buf, uint16(n)) +} + +// WriteSOffsetT encodes a little-endian SOffsetT into a byte slice. +func WriteSOffsetT(buf []byte, n SOffsetT) { + WriteInt32(buf, int32(n)) +} + +// WriteUOffsetT encodes a little-endian UOffsetT into a byte slice. +func WriteUOffsetT(buf []byte, n UOffsetT) { + WriteUint32(buf, uint32(n)) +} diff --git a/vendor/github.com/google/flatbuffers/go/grpc.go b/vendor/github.com/google/flatbuffers/go/grpc.go new file mode 100644 index 000000000000..e7dabd3c69a0 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/grpc.go @@ -0,0 +1,23 @@ +package flatbuffers + +// Codec implements gRPC-go Codec which is used to encode and decode messages. +var Codec = "flatbuffers" + +type FlatbuffersCodec struct{} + +func (FlatbuffersCodec) Marshal(v interface{}) ([]byte, error) { + return v.(*Builder).FinishedBytes(), nil +} + +func (FlatbuffersCodec) Unmarshal(data []byte, v interface{}) error { + v.(flatbuffersInit).Init(data, GetUOffsetT(data)) + return nil +} + +func (FlatbuffersCodec) String() string { + return Codec +} + +type flatbuffersInit interface { + Init(data []byte, i UOffsetT) +} diff --git a/vendor/github.com/google/flatbuffers/go/lib.go b/vendor/github.com/google/flatbuffers/go/lib.go new file mode 100644 index 000000000000..adfce52efe52 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/lib.go @@ -0,0 +1,13 @@ +package flatbuffers + +// FlatBuffer is the interface that represents a flatbuffer. +type FlatBuffer interface { + Table() Table + Init(buf []byte, i UOffsetT) +} + +// GetRootAs is a generic helper to initialize a FlatBuffer with the provided buffer bytes and its data offset. +func GetRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) { + n := GetUOffsetT(buf[offset:]) + fb.Init(buf, n+offset) +} diff --git a/vendor/github.com/google/flatbuffers/go/sizes.go b/vendor/github.com/google/flatbuffers/go/sizes.go new file mode 100644 index 000000000000..ba221698455f --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/sizes.go @@ -0,0 +1,55 @@ +package flatbuffers + +import ( + "unsafe" +) + +const ( + // See http://golang.org/ref/spec#Numeric_types + + // SizeUint8 is the byte size of a uint8. + SizeUint8 = 1 + // SizeUint16 is the byte size of a uint16. + SizeUint16 = 2 + // SizeUint32 is the byte size of a uint32. + SizeUint32 = 4 + // SizeUint64 is the byte size of a uint64. + SizeUint64 = 8 + + // SizeInt8 is the byte size of a int8. + SizeInt8 = 1 + // SizeInt16 is the byte size of a int16. + SizeInt16 = 2 + // SizeInt32 is the byte size of a int32. + SizeInt32 = 4 + // SizeInt64 is the byte size of a int64. + SizeInt64 = 8 + + // SizeFloat32 is the byte size of a float32. + SizeFloat32 = 4 + // SizeFloat64 is the byte size of a float64. + SizeFloat64 = 8 + + // SizeByte is the byte size of a byte. + // The `byte` type is aliased (by Go definition) to uint8. + SizeByte = 1 + + // SizeBool is the byte size of a bool. + // The `bool` type is aliased (by flatbuffers convention) to uint8. + SizeBool = 1 + + // SizeSOffsetT is the byte size of an SOffsetT. + // The `SOffsetT` type is aliased (by flatbuffers convention) to int32. + SizeSOffsetT = 4 + // SizeUOffsetT is the byte size of an UOffsetT. + // The `UOffsetT` type is aliased (by flatbuffers convention) to uint32. + SizeUOffsetT = 4 + // SizeVOffsetT is the byte size of an VOffsetT. + // The `VOffsetT` type is aliased (by flatbuffers convention) to uint16. + SizeVOffsetT = 2 +) + +// byteSliceToString converts a []byte to string without a heap allocation. +func byteSliceToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/google/flatbuffers/go/struct.go b/vendor/github.com/google/flatbuffers/go/struct.go new file mode 100644 index 000000000000..11258f715d46 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/struct.go @@ -0,0 +1,8 @@ +package flatbuffers + +// Struct wraps a byte slice and provides read access to its data. +// +// Structs do not have a vtable. +type Struct struct { + Table +} diff --git a/vendor/github.com/google/flatbuffers/go/table.go b/vendor/github.com/google/flatbuffers/go/table.go new file mode 100644 index 000000000000..b273146fad40 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/table.go @@ -0,0 +1,505 @@ +package flatbuffers + +// Table wraps a byte slice and provides read access to its data. +// +// The variable `Pos` indicates the root of the FlatBuffers object therein. +type Table struct { + Bytes []byte + Pos UOffsetT // Always < 1<<31. +} + +// Offset provides access into the Table's vtable. +// +// Fields which are deprecated are ignored by checking against the vtable's length. +func (t *Table) Offset(vtableOffset VOffsetT) VOffsetT { + vtable := UOffsetT(SOffsetT(t.Pos) - t.GetSOffsetT(t.Pos)) + if vtableOffset < t.GetVOffsetT(vtable) { + return t.GetVOffsetT(vtable + UOffsetT(vtableOffset)) + } + return 0 +} + +// Indirect retrieves the relative offset stored at `offset`. +func (t *Table) Indirect(off UOffsetT) UOffsetT { + return off + GetUOffsetT(t.Bytes[off:]) +} + +// String gets a string from data stored inside the flatbuffer. +func (t *Table) String(off UOffsetT) string { + b := t.ByteVector(off) + return byteSliceToString(b) +} + +// ByteVector gets a byte slice from data stored inside the flatbuffer. +func (t *Table) ByteVector(off UOffsetT) []byte { + off += GetUOffsetT(t.Bytes[off:]) + start := off + UOffsetT(SizeUOffsetT) + length := GetUOffsetT(t.Bytes[off:]) + return t.Bytes[start : start+length] +} + +// VectorLen retrieves the length of the vector whose offset is stored at +// "off" in this object. +func (t *Table) VectorLen(off UOffsetT) int { + off += t.Pos + off += GetUOffsetT(t.Bytes[off:]) + return int(GetUOffsetT(t.Bytes[off:])) +} + +// Vector retrieves the start of data of the vector whose offset is stored +// at "off" in this object. +func (t *Table) Vector(off UOffsetT) UOffsetT { + off += t.Pos + x := off + GetUOffsetT(t.Bytes[off:]) + // data starts after metadata containing the vector length + x += UOffsetT(SizeUOffsetT) + return x +} + +// Union initializes any Table-derived type to point to the union at the given +// offset. +func (t *Table) Union(t2 *Table, off UOffsetT) { + off += t.Pos + t2.Pos = off + t.GetUOffsetT(off) + t2.Bytes = t.Bytes +} + +// GetBool retrieves a bool at the given offset. +func (t *Table) GetBool(off UOffsetT) bool { + return GetBool(t.Bytes[off:]) +} + +// GetByte retrieves a byte at the given offset. +func (t *Table) GetByte(off UOffsetT) byte { + return GetByte(t.Bytes[off:]) +} + +// GetUint8 retrieves a uint8 at the given offset. +func (t *Table) GetUint8(off UOffsetT) uint8 { + return GetUint8(t.Bytes[off:]) +} + +// GetUint16 retrieves a uint16 at the given offset. +func (t *Table) GetUint16(off UOffsetT) uint16 { + return GetUint16(t.Bytes[off:]) +} + +// GetUint32 retrieves a uint32 at the given offset. +func (t *Table) GetUint32(off UOffsetT) uint32 { + return GetUint32(t.Bytes[off:]) +} + +// GetUint64 retrieves a uint64 at the given offset. +func (t *Table) GetUint64(off UOffsetT) uint64 { + return GetUint64(t.Bytes[off:]) +} + +// GetInt8 retrieves a int8 at the given offset. +func (t *Table) GetInt8(off UOffsetT) int8 { + return GetInt8(t.Bytes[off:]) +} + +// GetInt16 retrieves a int16 at the given offset. +func (t *Table) GetInt16(off UOffsetT) int16 { + return GetInt16(t.Bytes[off:]) +} + +// GetInt32 retrieves a int32 at the given offset. +func (t *Table) GetInt32(off UOffsetT) int32 { + return GetInt32(t.Bytes[off:]) +} + +// GetInt64 retrieves a int64 at the given offset. +func (t *Table) GetInt64(off UOffsetT) int64 { + return GetInt64(t.Bytes[off:]) +} + +// GetFloat32 retrieves a float32 at the given offset. +func (t *Table) GetFloat32(off UOffsetT) float32 { + return GetFloat32(t.Bytes[off:]) +} + +// GetFloat64 retrieves a float64 at the given offset. +func (t *Table) GetFloat64(off UOffsetT) float64 { + return GetFloat64(t.Bytes[off:]) +} + +// GetUOffsetT retrieves a UOffsetT at the given offset. +func (t *Table) GetUOffsetT(off UOffsetT) UOffsetT { + return GetUOffsetT(t.Bytes[off:]) +} + +// GetVOffsetT retrieves a VOffsetT at the given offset. +func (t *Table) GetVOffsetT(off UOffsetT) VOffsetT { + return GetVOffsetT(t.Bytes[off:]) +} + +// GetSOffsetT retrieves a SOffsetT at the given offset. +func (t *Table) GetSOffsetT(off UOffsetT) SOffsetT { + return GetSOffsetT(t.Bytes[off:]) +} + +// GetBoolSlot retrieves the bool that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetBoolSlot(slot VOffsetT, d bool) bool { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetBool(t.Pos + UOffsetT(off)) +} + +// GetByteSlot retrieves the byte that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetByteSlot(slot VOffsetT, d byte) byte { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetByte(t.Pos + UOffsetT(off)) +} + +// GetInt8Slot retrieves the int8 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt8Slot(slot VOffsetT, d int8) int8 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt8(t.Pos + UOffsetT(off)) +} + +// GetUint8Slot retrieves the uint8 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint8Slot(slot VOffsetT, d uint8) uint8 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint8(t.Pos + UOffsetT(off)) +} + +// GetInt16Slot retrieves the int16 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt16Slot(slot VOffsetT, d int16) int16 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt16(t.Pos + UOffsetT(off)) +} + +// GetUint16Slot retrieves the uint16 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint16Slot(slot VOffsetT, d uint16) uint16 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint16(t.Pos + UOffsetT(off)) +} + +// GetInt32Slot retrieves the int32 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt32Slot(slot VOffsetT, d int32) int32 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt32(t.Pos + UOffsetT(off)) +} + +// GetUint32Slot retrieves the uint32 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint32Slot(slot VOffsetT, d uint32) uint32 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint32(t.Pos + UOffsetT(off)) +} + +// GetInt64Slot retrieves the int64 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt64Slot(slot VOffsetT, d int64) int64 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt64(t.Pos + UOffsetT(off)) +} + +// GetUint64Slot retrieves the uint64 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint64Slot(slot VOffsetT, d uint64) uint64 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint64(t.Pos + UOffsetT(off)) +} + +// GetFloat32Slot retrieves the float32 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetFloat32Slot(slot VOffsetT, d float32) float32 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetFloat32(t.Pos + UOffsetT(off)) +} + +// GetFloat64Slot retrieves the float64 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetFloat64Slot(slot VOffsetT, d float64) float64 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetFloat64(t.Pos + UOffsetT(off)) +} + +// GetVOffsetTSlot retrieves the VOffsetT that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetVOffsetTSlot(slot VOffsetT, d VOffsetT) VOffsetT { + off := t.Offset(slot) + if off == 0 { + return d + } + return VOffsetT(off) +} + +// MutateBool updates a bool at the given offset. +func (t *Table) MutateBool(off UOffsetT, n bool) bool { + WriteBool(t.Bytes[off:], n) + return true +} + +// MutateByte updates a Byte at the given offset. +func (t *Table) MutateByte(off UOffsetT, n byte) bool { + WriteByte(t.Bytes[off:], n) + return true +} + +// MutateUint8 updates a Uint8 at the given offset. +func (t *Table) MutateUint8(off UOffsetT, n uint8) bool { + WriteUint8(t.Bytes[off:], n) + return true +} + +// MutateUint16 updates a Uint16 at the given offset. +func (t *Table) MutateUint16(off UOffsetT, n uint16) bool { + WriteUint16(t.Bytes[off:], n) + return true +} + +// MutateUint32 updates a Uint32 at the given offset. +func (t *Table) MutateUint32(off UOffsetT, n uint32) bool { + WriteUint32(t.Bytes[off:], n) + return true +} + +// MutateUint64 updates a Uint64 at the given offset. +func (t *Table) MutateUint64(off UOffsetT, n uint64) bool { + WriteUint64(t.Bytes[off:], n) + return true +} + +// MutateInt8 updates a Int8 at the given offset. +func (t *Table) MutateInt8(off UOffsetT, n int8) bool { + WriteInt8(t.Bytes[off:], n) + return true +} + +// MutateInt16 updates a Int16 at the given offset. +func (t *Table) MutateInt16(off UOffsetT, n int16) bool { + WriteInt16(t.Bytes[off:], n) + return true +} + +// MutateInt32 updates a Int32 at the given offset. +func (t *Table) MutateInt32(off UOffsetT, n int32) bool { + WriteInt32(t.Bytes[off:], n) + return true +} + +// MutateInt64 updates a Int64 at the given offset. +func (t *Table) MutateInt64(off UOffsetT, n int64) bool { + WriteInt64(t.Bytes[off:], n) + return true +} + +// MutateFloat32 updates a Float32 at the given offset. +func (t *Table) MutateFloat32(off UOffsetT, n float32) bool { + WriteFloat32(t.Bytes[off:], n) + return true +} + +// MutateFloat64 updates a Float64 at the given offset. +func (t *Table) MutateFloat64(off UOffsetT, n float64) bool { + WriteFloat64(t.Bytes[off:], n) + return true +} + +// MutateUOffsetT updates a UOffsetT at the given offset. +func (t *Table) MutateUOffsetT(off UOffsetT, n UOffsetT) bool { + WriteUOffsetT(t.Bytes[off:], n) + return true +} + +// MutateVOffsetT updates a VOffsetT at the given offset. +func (t *Table) MutateVOffsetT(off UOffsetT, n VOffsetT) bool { + WriteVOffsetT(t.Bytes[off:], n) + return true +} + +// MutateSOffsetT updates a SOffsetT at the given offset. +func (t *Table) MutateSOffsetT(off UOffsetT, n SOffsetT) bool { + WriteSOffsetT(t.Bytes[off:], n) + return true +} + +// MutateBoolSlot updates the bool at given vtable location +func (t *Table) MutateBoolSlot(slot VOffsetT, n bool) bool { + if off := t.Offset(slot); off != 0 { + t.MutateBool(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateByteSlot updates the byte at given vtable location +func (t *Table) MutateByteSlot(slot VOffsetT, n byte) bool { + if off := t.Offset(slot); off != 0 { + t.MutateByte(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt8Slot updates the int8 at given vtable location +func (t *Table) MutateInt8Slot(slot VOffsetT, n int8) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt8(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint8Slot updates the uint8 at given vtable location +func (t *Table) MutateUint8Slot(slot VOffsetT, n uint8) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint8(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt16Slot updates the int16 at given vtable location +func (t *Table) MutateInt16Slot(slot VOffsetT, n int16) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt16(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint16Slot updates the uint16 at given vtable location +func (t *Table) MutateUint16Slot(slot VOffsetT, n uint16) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint16(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt32Slot updates the int32 at given vtable location +func (t *Table) MutateInt32Slot(slot VOffsetT, n int32) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt32(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint32Slot updates the uint32 at given vtable location +func (t *Table) MutateUint32Slot(slot VOffsetT, n uint32) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint32(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt64Slot updates the int64 at given vtable location +func (t *Table) MutateInt64Slot(slot VOffsetT, n int64) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt64(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint64Slot updates the uint64 at given vtable location +func (t *Table) MutateUint64Slot(slot VOffsetT, n uint64) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint64(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateFloat32Slot updates the float32 at given vtable location +func (t *Table) MutateFloat32Slot(slot VOffsetT, n float32) bool { + if off := t.Offset(slot); off != 0 { + t.MutateFloat32(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateFloat64Slot updates the float64 at given vtable location +func (t *Table) MutateFloat64Slot(slot VOffsetT, n float64) bool { + if off := t.Offset(slot); off != 0 { + t.MutateFloat64(t.Pos+UOffsetT(off), n) + return true + } + + return false +} diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/CHANGELOG.md b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/CHANGELOG.md new file mode 100644 index 000000000000..0efef473a47f --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/CHANGELOG.md @@ -0,0 +1,5 @@ +## Next + +BUG FIXES: + +* Fixes `bound_claims` validation for provider-specific group and user info fetching [[GH-149](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/149)] diff --git a/vendor/github.com/hashicorp/vault-plugin-database-snowflake/.gitignore b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/.gitignore new file mode 100644 index 000000000000..0710efccff99 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/.gitignore @@ -0,0 +1,52 @@ +# Created by https://www.gitignore.io/api/go,macos +# Edit at https://www.gitignore.io/?templates=go,macos + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +vault-plugin-secrets-snowflake +vault-plugin-database-snowflake +vault* + +.idea/ +# End of https://www.gitignore.io/api/go,macos +# \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault-plugin-database-snowflake/LICENSE b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/LICENSE new file mode 100644 index 000000000000..a612ad9813b0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/vault-plugin-database-snowflake/README.md b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/README.md new file mode 100644 index 000000000000..2ae445077707 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/README.md @@ -0,0 +1,33 @@ +# vault-plugin-database-snowflake +A Vault plugin for Snowflake. It is one of the supported plugins for the HashiCorp Vault Database Secrets Engine and allows for the programmatic generation of unique, ephemeral Snowflake [Database User](https://docs.snowflake.com/en/user-guide/admin-user-management.html) credentials in a Snowflake account. + +This project uses the database plugin interface introduced in Vault version 1.6.0. + +**This plugin will be included in version 1.7 of Vault.** + +## Bugs and Feature Requests +Bugs should be filed under the Issues section of this repo. + +Feature requests can be submitted in the Issues section as well. + +## Quick Links + * Database Secrets Engine for Snowflake - Docs + * Database Secrets Engine for Snowflake - API Docs + * [Snowflake Website](https://www.snowflake.com/) + * [Vault Website](https://www.vaultproject.io) + +**Please note**: HashiCorp takes Vault's security and their users' trust very seriously. + +If you believe you have found a security issue in Vault or with this plugin, _please responsibly disclose_ by contacting HashiCorp at [security@hashicorp.com](mailto:security@hashicorp.com). + +## Acceptance Testing +In order to perform acceptance testing, you need to set the environment variable `VAULT_ACC=1` as well as provide all of the necessary information to connect to a Snowflake Project. All `SNOWFLAKE_*` environment variables must be provided in order for the acceptance tests to run properly. A cluster must be available during the test. A [30-day trial account](https://signup.snowflake.com/) can be provisioned manually to test. + +| Environment Variable | Description | +|----------------------|-------------| +| SNOWFLAKE_ACCOUNT | The account string for your snowflake instance. If you are using a non-AWS provider, or a region that isn't us-west-1 for AWS, region and provider should be included here. (example: `ec#####.east-us-2.azure`) | +| SNOWFLAKE_USER | The accountadmin level user that you are using with Vault | +| SNOWFLAKE_PASSWORD | The password associated with the provided user | +| SNOWFLAKE_DB | optional: The DB you are restricting the connection to | +| SNOWFLAKE_SCHEMA | optional: The schema you are restricting the connection to | +| SNOWFLAKE_WAREHOUSE | optional: The warehouse you are restricting the connection to | \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault-plugin-database-snowflake/go.mod b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/go.mod new file mode 100644 index 000000000000..f58631440044 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/go.mod @@ -0,0 +1,12 @@ +module github.com/hashicorp/vault-plugin-database-snowflake + +go 1.15 + +require ( + github.com/hashicorp/errwrap v1.0.0 + github.com/hashicorp/go-multierror v1.1.0 + github.com/hashicorp/vault/sdk v0.1.14-0.20201022214319-d87657199d4b + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/mitchellh/mapstructure v1.3.2 + github.com/snowflakedb/gosnowflake v1.3.11 +) diff --git a/vendor/github.com/hashicorp/vault-plugin-database-snowflake/go.sum b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/go.sum new file mode 100644 index 000000000000..a14d6d012c66 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/go.sum @@ -0,0 +1,368 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221530-14615acda45f/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20201022214319-d87657199d4b h1:kT0HPwthAisVgxAkm/kNGI2IHm0rAco28dOs3geL90E= +github.com/hashicorp/vault/sdk v0.1.14-0.20201022214319-d87657199d4b/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce h1:CGR1hXCOeoZ1aJhCs8qdKJuEu3xoZnxsLcYoh5Bnr+4= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= +github.com/snowflakedb/gosnowflake v1.3.11 h1:4VATaWPZv2HEh9bkZG5LaMux4WRiZJDu/PvvMCzrpUg= +github.com/snowflakedb/gosnowflake v1.3.11/go.mod h1:+BMe9ivHWpzcXbM1qSIxWZ8qpWGBBaA46o9Z1qSfrNg= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf3coIG4HqZElCPehJsfAYM= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault-plugin-database-snowflake/snowflake.go b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/snowflake.go new file mode 100644 index 000000000000..b4fb2282a19c --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-database-snowflake/snowflake.go @@ -0,0 +1,342 @@ +package snowflake + +import ( + "context" + "database/sql" + "fmt" + "math" + "strings" + "time" + + "github.com/hashicorp/errwrap" + + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/strutil" + _ "github.com/snowflakedb/gosnowflake" +) + +const ( + snowflakeSQLTypeName = "snowflake" + maxIdentifierLength = 255 + maxUsernameChunkLen = 32 // arbitrarily chosen + maxRolenameChunkLen = 32 // arbitrarily chosen + defaultSnowflakeRenewSQL = ` +alter user {{name}} set DAYS_TO_EXPIRY = {{expiration}}; +` + defaultSnowflakeRotateCredsSQL = ` +alter user {{name}} set PASSWORD = '{{password}}'; +` + defaultSnowflakeDeleteSQL = ` +drop user {{name}}; +` +) + +var ( + _ dbplugin.Database = (*SnowflakeSQL)(nil) +) + +func New() (interface{}, error) { + db := new() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + return dbType, nil +} + +func (s *SnowflakeSQL) secretValues() map[string]string { + return map[string]string{ + s.Password: "[password]", + } +} + +func new() *SnowflakeSQL { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = snowflakeSQLTypeName + + db := &SnowflakeSQL{ + SQLConnectionProducer: connProducer, + } + + return db +} + +type SnowflakeSQL struct { + *connutil.SQLConnectionProducer +} + +func (s *SnowflakeSQL) Type() (string, error) { + return snowflakeSQLTypeName, nil +} + +func (s *SnowflakeSQL) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := s.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (s *SnowflakeSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + err := s.SQLConnectionProducer.Initialize(ctx, req.Config, req.VerifyConnection) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + resp := dbplugin.InitializeResponse{ + Config: req.Config, + } + + return resp, nil +} + +func (s *SnowflakeSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + s.Lock() + defer s.Unlock() + + statements := req.Statements.Commands + if len(statements) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + username, err := s.generateUsername(req) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + password := req.Password + expirationStr, err := calculateExpirationString(req.Expiration) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + // Get the connection + db, err := s.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + defer tx.Rollback() + + // Execute each query + for _, stmt := range statements { + // it's fine to split the statements on the semicolon. + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + "expiration": expirationStr, + } + + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return dbplugin.NewUserResponse{}, err + } + } + } + + err = tx.Commit() + resp := dbplugin.NewUserResponse{ + Username: username, + } + return resp, err +} + +func (s *SnowflakeSQL) generateUsername(req dbplugin.NewUserRequest) (string, error) { + username, err := credsutil.GenerateUsername( + credsutil.DisplayName(req.UsernameConfig.DisplayName, maxUsernameChunkLen), + credsutil.RoleName(req.UsernameConfig.RoleName, maxRolenameChunkLen), + credsutil.MaxLength(maxIdentifierLength), + ) + if err != nil { + return "", errwrap.Wrapf("error generating username: {{err}}", err) + } + return username, nil +} + +func (s *SnowflakeSQL) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + s.Lock() + defer s.Unlock() + + if req.Username == "" { + err := fmt.Errorf("a username must be provided to update a user") + return dbplugin.UpdateUserResponse{}, err + } + + db, err := s.getConnection(ctx) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + defer tx.Rollback() + + if req.Password != nil { + err = s.updateUserPassword(ctx, tx, req.Username, req.Password) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + } + + if req.Expiration != nil { + err = s.updateUserExpiration(ctx, tx, req.Username, req.Expiration) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + } + + if err := tx.Commit(); err != nil { + return dbplugin.UpdateUserResponse{}, err + } + + return dbplugin.UpdateUserResponse{}, nil +} + +func (s *SnowflakeSQL) updateUserPassword(ctx context.Context, tx *sql.Tx, username string, req *dbplugin.ChangePassword) error { + password := req.NewPassword + + if username == "" || password == "" { + return fmt.Errorf("must provide both username and password to modify password") + } + + stmts := req.Statements.Commands + if len(stmts) == 0 { + stmts = []string{defaultSnowflakeRotateCredsSQL} + } + + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + } + + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + } + } + + return nil +} + +func (s *SnowflakeSQL) updateUserExpiration(ctx context.Context, tx *sql.Tx, username string, req *dbplugin.ChangeExpiration) error { + expiration := req.NewExpiration + + if username == "" || expiration.IsZero() { + return fmt.Errorf("must provide both username and valid expiration to modify expiration") + } + + expirationStr, err := calculateExpirationString(expiration) + if err != nil { + return err + } + + stmts := req.Statements.Commands + if len(stmts) == 0 { + stmts = []string{defaultSnowflakeRenewSQL} + } + + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "expiration": expirationStr, + } + + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + } + } + + return nil +} + +func (s *SnowflakeSQL) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + s.Lock() + defer s.Unlock() + + username := req.Username + statements := req.Statements.Commands + if len(statements) == 0 { + statements = []string{defaultSnowflakeDeleteSQL} + } + + db, err := s.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer tx.Rollback() + + for _, stmt := range statements { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + } + if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + } + } + + err = tx.Commit() + return dbplugin.DeleteUserResponse{}, err +} + +// calculateExpirationString has a minimum expiration of 1 Day. This +// limitation is due to Snowflake requiring any expiration to be in +// terms of days, with 1 being the minimum. +func calculateExpirationString(expiration time.Time) (string, error) { + currentTime := time.Now() + + if currentTime.Before(expiration) { + timeDiff := expiration.Sub(currentTime) + inSeconds := timeDiff.Seconds() + inDays := math.Max(math.Floor(inSeconds/float64(60*60*24)), 1) + + expirationStr := fmt.Sprintf("%d", int(inDays)) + return expirationStr, nil + } else { + err := fmt.Errorf("expiration time earlier than current time") + return "", err + } +} diff --git a/vendor/github.com/pkg/browser/LICENSE b/vendor/github.com/pkg/browser/LICENSE new file mode 100644 index 000000000000..65f78fb62910 --- /dev/null +++ b/vendor/github.com/pkg/browser/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md new file mode 100644 index 000000000000..72b1976e3035 --- /dev/null +++ b/vendor/github.com/pkg/browser/README.md @@ -0,0 +1,55 @@ + +# browser + import "github.com/pkg/browser" + +Package browser provides helpers to open files, readers, and urls in a browser window. + +The choice of which browser is started is entirely client dependant. + + + + + +## Variables +``` go +var Stderr io.Writer = os.Stderr +``` +Stderr is the io.Writer to which executed commands write standard error. + +``` go +var Stdout io.Writer = os.Stdout +``` +Stdout is the io.Writer to which executed commands write standard output. + + +## func OpenFile +``` go +func OpenFile(path string) error +``` +OpenFile opens new browser window for the file path. + + +## func OpenReader +``` go +func OpenReader(r io.Reader) error +``` +OpenReader consumes the contents of r and presents the +results in a new browser window. + + +## func OpenURL +``` go +func OpenURL(url string) error +``` +OpenURL opens a new browser window pointing to url. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go new file mode 100644 index 000000000000..3e5969064258 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser.go @@ -0,0 +1,63 @@ +// Package browser provides helpers to open files, readers, and urls in a browser window. +// +// The choice of which browser is started is entirely client dependant. +package browser + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +// Stdout is the io.Writer to which executed commands write standard output. +var Stdout io.Writer = os.Stdout + +// Stderr is the io.Writer to which executed commands write standard error. +var Stderr io.Writer = os.Stderr + +// OpenFile opens new browser window for the file path. +func OpenFile(path string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + return OpenURL("file://" + path) +} + +// OpenReader consumes the contents of r and presents the +// results in a new browser window. +func OpenReader(r io.Reader) error { + f, err := ioutil.TempFile("", "browser") + if err != nil { + return fmt.Errorf("browser: could not create temporary file: %v", err) + } + if _, err := io.Copy(f, r); err != nil { + f.Close() + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + oldname := f.Name() + newname := oldname + ".html" + if err := os.Rename(oldname, newname); err != nil { + return fmt.Errorf("browser: renaming temporary file failed: %v", err) + } + return OpenFile(newname) +} + +// OpenURL opens a new browser window pointing to url. +func OpenURL(url string) error { + return openBrowser(url) +} + +func runCmd(prog string, args ...string) error { + cmd := exec.Command(prog, args...) + cmd.Stdout = Stdout + cmd.Stderr = Stderr + setFlags(cmd) + return cmd.Run() +} diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go new file mode 100644 index 000000000000..6dff0403c711 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -0,0 +1,9 @@ +package browser + +import "os/exec" + +func openBrowser(url string) error { + return runCmd("open", url) +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go new file mode 100644 index 000000000000..656c693baa79 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -0,0 +1,9 @@ +package browser + +import "os/exec" + +func openBrowser(url string) error { + return runCmd("xdg-open", url) +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go new file mode 100644 index 000000000000..8cc0a7f539b4 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -0,0 +1,16 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go new file mode 100644 index 000000000000..0e1e530c544e --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!darwin,!openbsd + +package browser + +import ( + "fmt" + "os/exec" + "runtime" +) + +func openBrowser(url string) error { + return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go new file mode 100644 index 000000000000..a964c7b914f1 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -0,0 +1,16 @@ +package browser + +import ( + "os/exec" + "strings" + "syscall" +) + +func openBrowser(url string) error { + r := strings.NewReplacer("&", "^&") + return runCmd("cmd", "/c", "start", r.Replace(url)) +} + +func setFlags(cmd *exec.Cmd) { + cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} +} diff --git a/vendor/github.com/snowflakedb/glog/LICENSE b/vendor/github.com/snowflakedb/glog/LICENSE new file mode 100644 index 000000000000..37ec93a14fdc --- /dev/null +++ b/vendor/github.com/snowflakedb/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/snowflakedb/glog/README b/vendor/github.com/snowflakedb/glog/README new file mode 100644 index 000000000000..387b4eb68909 --- /dev/null +++ b/vendor/github.com/snowflakedb/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/snowflakedb/glog/glog.go b/vendor/github.com/snowflakedb/glog/glog.go new file mode 100644 index 000000000000..2efbceb54c3b --- /dev/null +++ b/vendor/github.com/snowflakedb/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow().UTC() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/snowflakedb/glog/glog_file.go b/vendor/github.com/snowflakedb/glog/glog_file.go new file mode 100644 index 000000000000..65075d281110 --- /dev/null +++ b/vendor/github.com/snowflakedb/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/.gitignore b/vendor/github.com/snowflakedb/gosnowflake/.gitignore new file mode 100644 index 000000000000..d0fae246fd80 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/.gitignore @@ -0,0 +1,13 @@ +.idea/ +parameters*.json +parameters*.bat +*.p8 +coverage.txt +fuzz-*/ +/select1 +/selectmany +/verifycert +wss-golang-agent.config +wss-unified-agent.jar +whitesource/ +*.swp diff --git a/vendor/github.com/snowflakedb/gosnowflake/.semgrep.yml b/vendor/github.com/snowflakedb/gosnowflake/.semgrep.yml new file mode 100644 index 000000000000..78fbb53678ed --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/.semgrep.yml @@ -0,0 +1,158 @@ +rules: +- id: tls-with-insecure-cipher + message: | + CipherSuite configuration is insecure based on the tls package. See https://golang.org/pkg/crypto/tls/#InsecureCipherSuites + for why and what other cipher suites to use. + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#tls-with-insecure-cipher + metadata: + cwe: 'CWE-327: Use of a Broken or Risky Cryptographic Algorithm' + owasp: 'A9: Using Components with Known Vulnerabilities' + source-rule-url: https://github.com/securego/gosec/blob/master/rules/tls.go + references: + - https://golang.org/pkg/crypto/tls/#InsecureCipherSuites + languages: [go] + severity: WARNING + pattern-either: + - pattern: | + tls.Config{..., CipherSuites: []$TYPE{..., tls.TLS_RSA_WITH_RC4_128_SHA,...}} + - pattern: | + tls.Config{..., CipherSuites: []$TYPE{..., tls.TLS_RSA_WITH_AES_128_CBC_SHA256,...}} + - pattern: | + tls.Config{..., CipherSuites: []$TYPE{..., tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,...}} + - pattern: | + tls.Config{..., CipherSuites: []$TYPE{..., tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,...}} + - pattern: | + tls.Config{..., CipherSuites: []$TYPE{..., tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,...}} + - pattern: | + tls.Config{..., CipherSuites: []$TYPE{..., tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,...}} + - pattern: | + tls.CipherSuite{..., TLS_RSA_WITH_RC4_128_SHA ,...} + - pattern: | + tls.CipherSuite{..., TLS_RSA_WITH_AES_128_CBC_SHA256 ,...} + - pattern: | + tls.CipherSuite{..., TLS_ECDHE_ECDSA_WITH_RC4_128_SHA ,...} + - pattern: | + tls.CipherSuite{..., TLS_ECDHE_RSA_WITH_RC4_128_SHA ,...} + - pattern: | + tls.CipherSuite{..., TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 ,...} + - pattern: |- + tls.CipherSuite{..., TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 ,...} +- id: avoid-ssh-insecure-ignore-host-key + message: | + Disabling host key verification is discouraged. See https://skarlso.github.io/2019/02/17/go-ssh-with-host-key-verification/ + to learn more about the problem and how to fix it. + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#avoid-ssh-insecure-ignore-host-key + metadata: + cwe: 'CWE-322: Key Exchange without Entity Authentication' + owasp: 'A3: Sensitive Data Exposure' + source-rule-url: https://github.com/securego/gosec + references: + - https://skarlso.github.io/2019/02/17/go-ssh-with-host-key-verification/ + languages: [go] + severity: WARNING + pattern: |- + ssh.InsecureIgnoreHostKey() +- id: ssl-v3-is-insecure + message: | + SSLv3 is known to be broken. Use TLS versions 1.2 or higher. + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#ssl-v3-is-insecure + metadata: + cwe: 'CWE-327: Use of a Broken or Risky Cryptographic Algorithm' + owasp: 'A9: Using Components with Known Vulnerabilities' + source-rule-url: https://github.com/securego/gosec/blob/master/rules/tls_config.go + references: + - https://golang.org/doc/go1.14#crypto/tls + - https://www.us-cert.gov/ncas/alerts/TA14-290A + languages: [go] + severity: WARNING + patterns: + - pattern: |- + tls.Config{..., MinVersion: $TLS.VersionSSL30, ...} +- id: use-of-md5 + message: | + Use of weak cryptographic primitive MD5 + Use SHA256 for signature verification and PBKDF2 for password hashing + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#use-of-md5.1 + languages: [go] + severity: WARNING + metadata: + owasp: 'A9: Using Components with Known Vulnerabilities' + cwe: 'CWE-327: Use of a Broken or Risky Cryptographic Algorithm' + source-rule-url: https://github.com/securego/gosec#available-rules + pattern-either: + - pattern: | + md5.New() + - pattern: | + md5.Sum(...) +- id: use-of-sha1 + message: | + Use of weak cryptographic primitive SHA1 + Use SHA256 for signature verification and PBKDF2 for password hashing. + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#use-of-sha1.1 + languages: [go] + severity: WARNING + metadata: + owasp: 'A9: Using Components with Known Vulnerabilities' + cwe: 'CWE-327: Use of a Broken or Risky Cryptographic Algorithm' + source-rule-url: https://github.com/securego/gosec#available-rules + pattern-either: + - pattern: | + sha1.New() + - pattern: | + sha1.Sum(...) +- id: use-of-DES + message: | + Use of weak cryptographic primitive DES + Use AES for encryption instead + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#use-of-DES + languages: [go] + severity: WARNING + metadata: + owasp: 'A9: Using Components with Known Vulnerabilities' + cwe: 'CWE-327: Use of a Broken or Risky Cryptographic Algorithm' + source-rule-url: https://github.com/securego/gosec#available-rules + pattern-either: + - pattern: | + des.NewTripleDESCipher(...) + - pattern: | + des.NewCipher(...) +- id: use-of-rc4 + message: | + Use of weak cryptographic primitive rc4 + Use AES for encryption instead + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#use-of-rc4 + languages: [go] + severity: WARNING + metadata: + owasp: 'A9: Using Components with Known Vulnerabilities' + cwe: 'CWE-327: Use of a Broken or Risky Cryptographic Algorithm' + source-rule-url: https://github.com/securego/gosec#available-rules + pattern: |- + rc4.NewCipher(...) +- id: useless-if-conditional + patterns: + - pattern-either: + - pattern: |- + if ($X) { + ... + } else if ($X) { + ... + } + message: | + If block checks for the same condition on both branches (`$X`) + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#useless-if-conditional + languages: [go] + severity: WARNING +- id: useless-if-body + patterns: + - pattern: |- + if ($X) { + $S + } else { + $S + } + message: | + Useless if statment; both blocks have the same body + For more information, please refer to https://snowflakecomputing.atlassian.net/wiki/spaces/CLO/pages/1127713128/Semgrep+Finding+Remediation#useless-if-body + languages: [go] + severity: WARNING diff --git a/vendor/github.com/snowflakedb/gosnowflake/CHANGELOG.md b/vendor/github.com/snowflakedb/gosnowflake/CHANGELOG.md new file mode 100644 index 000000000000..8751493a4b2c --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/CHANGELOG.md @@ -0,0 +1,100 @@ +## Version 1.1.18 +- Ignore session gone error when closing session. +- Set the latest Go version to 1.12 + +## Version 1.1.17 +- TIMESTAMP_LTZ is not converted properly +- Add SERVICE_NAME support to Golang + +## Version 1.1.16 +- Fix custom json parser (@mhseiden) +- Modify Support link on Gosnowflake github + +## Version 1.1.15 +- Perform check on error for pingpong response (@ChTimTsubasa) +- Add new 1.11.x for testing (@ChTimTsubasa) +- Handle extra snowflake parameter 'type'(@ChTimTsubasa) + +## Version 1.1.14 +- Disable tests for golang 1.8 (@ChTimTsubasa) +- Follow lint's new include path (@ChTimTsubasa) +- Do not sleep through a context timeout(@mhseiden) + +## Version 1.1.13 +- User configurable Retry timeout for downloading (@mhseiden) +- Implement retry-uuid in the url (@ChTimTsubasa) +- Remove unnecessary go routine and fix context cancel/timeout handling (@ChTimTsubasa) + +## Version 1.1.12 +- Allow users to customize their glog through different vendoring. (@ChTimTsubasa) +- Doc improvment for region parameter description (@smtakeda) + +## Version 1.1.11 + +- (Private Preview) Added key pair authentication. (@ChTimTsubasa) +- Changed glog timestamp to UTC (@ChTimTsubasa) +- (Experimental) Added `MaxChunkDownloadWorkers` and `CustomJSONDecoderEnabled` to tune the result set download performance. (@mhseiden) + +## Version 1.1.10 + +- Fixed heartbeat timings. It used to start a heartbeat per query. Now it starts per connection and closes in `Close` method. #181 +- Removed busy wait from 1) the main thread that waits for download chunk worker to finish downloads, 2) the heartbeat goroutine/thread to trigger the heartbeat to the server. + +## Version 1.1.9 + +- Fixed proxy for OCSP (@brendoncarroll) +- Disabled megacheck for Go 1.8 +- Changed the UUID dependency (@kenshaw) + +## Version 1.1.8 + +- Removed username restriction for oAuth + +## Version 1.1.7 + +- Added `client_session_keep_alive` option to have a heartbeat in the background every hour to keep the connection alive. Fixed #160 +- Corrected doc about OCSP. +- Added OS session info to the session. + +## Version 1.1.6 + +- Fixed memory leak in the large result set. The chunk of memory is freed as soon as the cursor moved forward. +- Removed glide dependency in favor of dep #149 (@tjj5036) +- Fixed username and password URL escape issue #151 +- Added Go 1.10 test. + +## Version 1.1.5 + +- Added externalbrowser authenticator support PR #141, #142 (@tjj5036) + +## Version 1.1.4 + +- Raise HTTP 403 errors immediately after the authentication failure instead of retry until the timeout. Issue #138 (@dominicbarnes) +- Fixed vararg error message. + +## Version 1.1.3 + +- Removed hardcoded `public` schema name in case not specified. +- Fixed `requestId` value + +## Version 1.1.2 + +- `nil` should set to the target value instead of the pointer to the target + +## Version 1.1.1 + +- Fixed HTTP 403 errors when getting result sets from AWS S3. The change in the server release 2.23.0 will enforce a signature of key for result set. + +## Version 1.1.0 + +- Fixed #125. Dropped proxy parameters. HTTP_PROXY, HTTPS_PROXY and NO_PROXY should be used. +- Improved logging based on security code review. No sensitive information is logged. +- Added no connection pool example +- Fixed #110. Raise error if the specified db, schema or warehouse doesn't exist. role was already supported. +- Added go 1.9 config in TravisCI +- Added session parameter support in DSN. + +## Vesrion 1.0.0 + +- Added [dep](https://github.com/golang/dep) manifest (@CrimsonVoid) +- Bumped up the version to 1.0.0 diff --git a/vendor/github.com/snowflakedb/gosnowflake/CONTRIBUTING.md b/vendor/github.com/snowflakedb/gosnowflake/CONTRIBUTING.md new file mode 100644 index 000000000000..75e2eca6231b --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/snowflakedb/gosnowflake/issues?state=open) or was [recently closed](https://github.com/snowflakedb/gosnowflake/issues?direction=desc&page=1&sort=updated&state=closed). + +## Contributing Code + +By contributing to this project, you share your code under the Apache License 2, as specified in the LICENSE file. + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one Snowflake team member must have commented with "LGTM". diff --git a/vendor/github.com/snowflakedb/gosnowflake/LICENSE b/vendor/github.com/snowflakedb/gosnowflake/LICENSE new file mode 100644 index 000000000000..01f96dcae111 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/snowflakedb/gosnowflake/Makefile b/vendor/github.com/snowflakedb/gosnowflake/Makefile new file mode 100644 index 000000000000..b8e5c051af6e --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/Makefile @@ -0,0 +1,53 @@ +NAME:=gosnowflake +VERSION:=$(shell git describe --tags --abbrev=0) +REVISION:=$(shell git rev-parse --short HEAD) +COVFLAGS:= + +## Run fmt, lint and test +all: fmt lint wss cov + +include gosnowflake.mak + +## Run tests +test_setup: test_teardown + python3 ci/scripts/hang_webserver.py 12345 & + +test_teardown: + kill -9 $$(ps -ewf | grep hang_webserver | grep -v grep | awk '{print $$2}') || true + +test: deps test_setup + ./ci/scripts/test_component.sh + +## Run Coverage tests +cov: + make test COVFLAGS="-coverprofile=coverage.txt -covermode=atomic" + +wss: deps + ./ci/scripts/wss.sh + +## Lint +lint: clint + +## Format source codes +fmt: cfmt + @for c in $$(ls cmd); do \ + (cd cmd/$$c; make fmt); \ + done + +## Install sample programs +install: + for c in $$(ls cmd); do \ + (cd cmd/$$c; GOBIN=$$GOPATH/bin go install $$c.go); \ + done + +## Build fuzz tests +fuzz-build: + for c in $$(ls | grep -E "fuzz-*"); do \ + (cd $$c; make fuzz-build); \ + done + +## Run fuzz-dsn +fuzz-dsn: + (cd fuzz-dsn; go-fuzz -bin=./dsn-fuzz.zip -workdir=.) + +.PHONY: setup deps update test lint help fuzz-dsn diff --git a/vendor/github.com/snowflakedb/gosnowflake/README.rst b/vendor/github.com/snowflakedb/gosnowflake/README.rst new file mode 100644 index 000000000000..e1d4ad2e489d --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/README.rst @@ -0,0 +1,102 @@ +******************************************************************************** +Go Snowflake Driver +******************************************************************************** + +.. image:: https://github.com/snowflakedb/gosnowflake/workflows/Build%20and%20Test/badge.svg?branch=master + :target: https://github.com/snowflakedb/gosnowflake/actions?query=workflow%3A%22Build+and+Test%22 + +.. image:: http://img.shields.io/:license-Apache%202-brightgreen.svg + :target: http://www.apache.org/licenses/LICENSE-2.0.txt + +.. image:: https://goreportcard.com/badge/github.com/snowflakedb/gosnowflake + :target: https://goreportcard.com/report/github.com/snowflakedb/gosnowflake + +This topic provides instructions for installing, running, and modifying the Go Snowflake Driver. The driver supports Go's `database/sql `_ package. + +Prerequisites +================================================================================ + +The following software packages are required to use the Go Snowflake Driver. + +Go +---------------------------------------------------------------------- + +The latest driver requires the `Go language `_ 1.14 or higher. The supported operating systems are Linux, Mac OS, and Windows, but you may run the driver on other platforms if the Go language works correctly on those platforms. + + +Installation +================================================================================ + +Get Gosnowflake source code, if not installed. + +.. code-block:: bash + + go get -u github.com/snowflakedb/gosnowflake + +Docs +==== + +For detailed documentation and basic usage examples, please see the documentation at +`godoc.org `_. + +Sample Programs +================================================================================ + +Snowflake provides a set of sample programs to test with. Set the environment variable ``$GOPATH`` to the top directory of your workspace, e.g., ``~/go`` and make certain to +include ``$GOPATH/bin`` in the environment variable ``$PATH``. Run the ``make`` command to build all sample programs. + +.. code-block:: go + + make install + +In the following example, the program ``select1.go`` is built and installed in ``$GOPATH/bin`` and can be run from the command line: + +.. code-block:: bash + + SNOWFLAKE_TEST_ACCOUNT= \ + SNOWFLAKE_TEST_USER= \ + SNOWFLAKE_TEST_PASSWORD= \ + select1 + Congrats! You have successfully run SELECT 1 with Snowflake DB! + +Development +================================================================================ + +The developer notes are hosted with the source code on `GitHub `_. + +Testing Code +---------------------------------------------------------------------- + +Set the Snowflake connection info in ``parameters.json``: + +.. code-block:: json + + { + "testconnection": { + "SNOWFLAKE_TEST_USER": "", + "SNOWFLAKE_TEST_PASSWORD": "", + "SNOWFLAKE_TEST_ACCOUNT": "", + "SNOWFLAKE_TEST_WAREHOUSE": "", + "SNOWFLAKE_TEST_DATABASE": "", + "SNOWFLAKE_TEST_SCHEMA": "", + "SNOWFLAKE_TEST_ROLE": "" + } + } + +Install `jq `_ so that the parameters can get parsed correctly, and run ``make test`` in your Go development environment: + +.. code-block:: bash + + make test + +Submitting Pull Requests +---------------------------------------------------------------------- + +You may use your preferred editor to edit the driver code. Make certain to run ``make fmt lint`` before submitting any pull request to Snowflake. This command formats your source code according to the standard Go style and detects any coding style issues. + +Support +---------------------------------------------------------------------- + +For official support, contact Snowflake support at: +https://support.snowflake.net/ + diff --git a/vendor/github.com/snowflakedb/gosnowflake/auth.go b/vendor/github.com/snowflakedb/gosnowflake/auth.go new file mode 100644 index 000000000000..1ebfda65e435 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/auth.go @@ -0,0 +1,408 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "github.com/dgrijalva/jwt-go/v4" + "github.com/google/uuid" + + "crypto/sha256" + "crypto/x509" + "encoding/base64" +) + +const ( + clientType = "Go" +) + +// AuthType indicates the type of authentication in Snowflake +type AuthType int + +const ( + // AuthTypeSnowflake is the general username password authentication + AuthTypeSnowflake AuthType = iota + // AuthTypeOAuth is the OAuth authentication + AuthTypeOAuth + // AuthTypeExternalBrowser is to use a browser to access an Fed and perform SSO authentication + AuthTypeExternalBrowser + // AuthTypeOkta is to use a native okta URL to perform SSO authentication on Okta + AuthTypeOkta + // AuthTypeJwt is to use Jwt to perform authentication + AuthTypeJwt +) + +func determineAuthenticatorType(cfg *Config, value string) error { + upperCaseValue := strings.ToUpper(value) + lowerCaseValue := strings.ToLower(value) + if strings.Trim(value, " ") == "" || upperCaseValue == AuthTypeSnowflake.String() { + cfg.Authenticator = AuthTypeSnowflake + return nil + } else if upperCaseValue == AuthTypeOAuth.String() { + cfg.Authenticator = AuthTypeOAuth + return nil + } else if upperCaseValue == AuthTypeJwt.String() { + cfg.Authenticator = AuthTypeJwt + return nil + } else if upperCaseValue == AuthTypeExternalBrowser.String() { + cfg.Authenticator = AuthTypeExternalBrowser + return nil + } else { + // possibly Okta case + oktaURLString, err := url.QueryUnescape(lowerCaseValue) + if err != nil { + return &SnowflakeError{ + Number: ErrCodeFailedToParseAuthenticator, + Message: errMsgFailedToParseAuthenticator, + MessageArgs: []interface{}{lowerCaseValue}, + } + } + + oktaURL, err := url.Parse(oktaURLString) + if err != nil { + return &SnowflakeError{ + Number: ErrCodeFailedToParseAuthenticator, + Message: errMsgFailedToParseAuthenticator, + MessageArgs: []interface{}{oktaURLString}, + } + } + + if oktaURL.Scheme != "https" || !strings.HasSuffix(oktaURL.Host, "okta.com") { + return &SnowflakeError{ + Number: ErrCodeFailedToParseAuthenticator, + Message: errMsgFailedToParseAuthenticator, + MessageArgs: []interface{}{oktaURLString}, + } + } + cfg.OktaURL = oktaURL + cfg.Authenticator = AuthTypeOkta + } + return nil +} + +func (authType AuthType) String() string { + switch authType { + case AuthTypeSnowflake: + return "SNOWFLAKE" + case AuthTypeOAuth: + return "OAUTH" + case AuthTypeExternalBrowser: + return "EXTERNALBROWSER" + case AuthTypeOkta: + return "OKTA" + case AuthTypeJwt: + return "SNOWFLAKE_JWT" + default: + return "UNKNOWN" + } +} + +// platform consists of compiler and architecture type in string +var platform = fmt.Sprintf("%v-%v", runtime.Compiler, runtime.GOARCH) + +// operatingSystem is the runtime operating system. +var operatingSystem = runtime.GOOS + +// userAgent shows up in User-Agent HTTP header +var userAgent = fmt.Sprintf("%v/%v/%v/%v/%v-%v", + clientType, + SnowflakeGoDriverVersion, + runtime.Compiler, + runtime.Version(), + operatingSystem, + runtime.GOARCH) + +type authRequestClientEnvironment struct { + Application string `json:"APPLICATION"` + Os string `json:"OS"` + OsVersion string `json:"OS_VERSION"` + OCSPMode string `json:"OCSP_MODE"` +} +type authRequestData struct { + ClientAppID string `json:"CLIENT_APP_ID"` + ClientAppVersion string `json:"CLIENT_APP_VERSION"` + SvnRevision string `json:"SVN_REVISION"` + AccountName string `json:"ACCOUNT_NAME"` + LoginName string `json:"LOGIN_NAME,omitempty"` + Password string `json:"PASSWORD,omitempty"` + RawSAMLResponse string `json:"RAW_SAML_RESPONSE,omitempty"` + ExtAuthnDuoMethod string `json:"EXT_AUTHN_DUO_METHOD,omitempty"` + Passcode string `json:"PASSCODE,omitempty"` + Authenticator string `json:"AUTHENTICATOR,omitempty"` + SessionParameters map[string]interface{} `json:"SESSION_PARAMETERS,omitempty"` + ClientEnvironment authRequestClientEnvironment `json:"CLIENT_ENVIRONMENT"` + BrowserModeRedirectPort string `json:"BROWSER_MODE_REDIRECT_PORT,omitempty"` + ProofKey string `json:"PROOF_KEY,omitempty"` + Token string `json:"TOKEN,omitempty"` +} +type authRequest struct { + Data authRequestData `json:"data"` +} + +type nameValueParameter struct { + Name string `json:"name"` + Value interface{} `json:"value"` +} + +type authResponseSessionInfo struct { + DatabaseName string `json:"databaseName"` + SchemaName string `json:"schemaName"` + WarehouseName string `json:"warehouseName"` + RoleName string `json:"roleName"` +} + +type authResponseMain struct { + Token string `json:"token,omitempty"` + Validity time.Duration `json:"validityInSeconds,omitempty"` + MasterToken string `json:"masterToken,omitempty"` + MasterValidity time.Duration `json:"masterValidityInSeconds"` + DisplayUserName string `json:"displayUserName"` + ServerVersion string `json:"serverVersion"` + FirstLogin bool `json:"firstLogin"` + RemMeToken string `json:"remMeToken"` + RemMeValidity time.Duration `json:"remMeValidityInSeconds"` + HealthCheckInterval time.Duration `json:"healthCheckInterval"` + NewClientForUpgrade string `json:"newClientForUpgrade"` + SessionID int `json:"sessionId"` + Parameters []nameValueParameter `json:"parameters"` + SessionInfo authResponseSessionInfo `json:"sessionInfo"` + TokenURL string `json:"tokenUrl,omitempty"` + SSOURL string `json:"ssoUrl,omitempty"` + ProofKey string `json:"proofKey,omitempty"` +} +type authResponse struct { + Data authResponseMain `json:"data"` + Message string `json:"message"` + Code string `json:"code"` + Success bool `json:"success"` +} + +func postAuth( + ctx context.Context, + sr *snowflakeRestful, + params *url.Values, + headers map[string]string, + body []byte, + timeout time.Duration) ( + data *authResponse, err error) { + params.Add(requestIDKey, uuid.New().String()) + params.Add(requestGUIDKey, uuid.New().String()) + + fullURL := sr.getFullURL(loginRequestPath, params) + glog.V(2).Infof("full URL: %v", fullURL) + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, true) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + var respd authResponse + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return nil, err + } + return &respd, nil + } + switch resp.StatusCode { + case http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: + // service availability or connectivity issue. Most likely server side issue. + return nil, &SnowflakeError{ + Number: ErrCodeServiceUnavailable, + SQLState: SQLStateConnectionWasNotEstablished, + Message: errMsgServiceUnavailable, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } + case http.StatusUnauthorized, http.StatusForbidden: + // failed to connect to db. account name may be wrong + return nil, &SnowflakeError{ + Number: ErrCodeFailedToConnect, + SQLState: SQLStateConnectionRejected, + Message: errMsgFailedToConnect, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) + glog.Flush() + return nil, err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return nil, &SnowflakeError{ + Number: ErrFailedToAuth, + SQLState: SQLStateConnectionRejected, + Message: errMsgFailedToAuth, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } +} + +// Generates a map of headers needed to authenticate +// with Snowflake. +func getHeaders() map[string]string { + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake + headers["User-Agent"] = userAgent + return headers +} + +// Used to authenticate the user with Snowflake. +func authenticate( + ctx context.Context, + sc *snowflakeConn, + samlResponse []byte, + proofKey []byte, +) (resp *authResponseMain, err error) { + + headers := getHeaders() + clientEnvironment := authRequestClientEnvironment{ + Application: sc.cfg.Application, + Os: operatingSystem, + OsVersion: platform, + OCSPMode: sc.cfg.ocspMode(), + } + + sessionParameters := make(map[string]interface{}) + for k, v := range sc.cfg.Params { + // upper casing to normalize keys + sessionParameters[strings.ToUpper(k)] = *v + } + + sessionParameters[sessionClientValidateDefaultParameters] = sc.cfg.ValidateDefaultParameters != ConfigBoolFalse + + requestMain := authRequestData{ + ClientAppID: clientType, + ClientAppVersion: SnowflakeGoDriverVersion, + AccountName: sc.cfg.Account, + SessionParameters: sessionParameters, + ClientEnvironment: clientEnvironment, + } + + switch sc.cfg.Authenticator { + case AuthTypeExternalBrowser: + requestMain.ProofKey = string(proofKey) + requestMain.Token = string(samlResponse) + requestMain.LoginName = sc.cfg.User + requestMain.Authenticator = AuthTypeExternalBrowser.String() + case AuthTypeOAuth: + requestMain.LoginName = sc.cfg.User + requestMain.Authenticator = AuthTypeOAuth.String() + requestMain.Token = sc.cfg.Token + case AuthTypeOkta: + requestMain.RawSAMLResponse = string(samlResponse) + case AuthTypeJwt: + requestMain.Authenticator = AuthTypeJwt.String() + + jwtTokenString, err := prepareJWTToken(sc.cfg) + if err != nil { + return nil, err + } + requestMain.Token = jwtTokenString + case AuthTypeSnowflake: + glog.V(2).Info("Username and password") + requestMain.LoginName = sc.cfg.User + requestMain.Password = sc.cfg.Password + switch { + case sc.cfg.PasscodeInPassword: + requestMain.ExtAuthnDuoMethod = "passcode" + case sc.cfg.Passcode != "": + requestMain.Passcode = sc.cfg.Passcode + requestMain.ExtAuthnDuoMethod = "passcode" + } + } + + authRequest := authRequest{ + Data: requestMain, + } + params := &url.Values{} + if sc.cfg.Database != "" { + params.Add("databaseName", sc.cfg.Database) + } + if sc.cfg.Schema != "" { + params.Add("schemaName", sc.cfg.Schema) + } + if sc.cfg.Warehouse != "" { + params.Add("warehouse", sc.cfg.Warehouse) + } + if sc.cfg.Role != "" { + params.Add("roleName", sc.cfg.Role) + } + + jsonBody, err := json.Marshal(authRequest) + if err != nil { + return + } + + glog.V(2).Infof("PARAMS for Auth: %v, %v, %v, %v, %v, %v", + params, sc.rest.Protocol, sc.rest.Host, sc.rest.Port, sc.rest.LoginTimeout, sc.cfg.Authenticator.String()) + + respd, err := sc.rest.FuncPostAuth(ctx, sc.rest, params, headers, jsonBody, sc.rest.LoginTimeout) + if err != nil { + return nil, err + } + if !respd.Success { + glog.V(1).Infoln("Authentication FAILED") + glog.Flush() + sc.rest.Token = "" + sc.rest.MasterToken = "" + sc.rest.SessionID = -1 + code, err := strconv.Atoi(respd.Code) + if err != nil { + code = -1 + return nil, err + } + return nil, &SnowflakeError{ + Number: code, + SQLState: SQLStateConnectionRejected, + Message: respd.Message, + } + } + glog.V(2).Info("Authentication SUCCESS") + sc.rest.Token = respd.Data.Token + sc.rest.MasterToken = respd.Data.MasterToken + sc.rest.SessionID = respd.Data.SessionID + return &respd.Data, nil +} + +// Generate a JWT token in string given the configuration +func prepareJWTToken(config *Config) (string, error) { + pubBytes, err := x509.MarshalPKIXPublicKey(config.PrivateKey.Public()) + if err != nil { + return "", err + } + hash := sha256.Sum256(pubBytes) + + accountName := strings.ToUpper(config.Account) + userName := strings.ToUpper(config.User) + + issueAtTime := time.Now().UTC() + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "iss": fmt.Sprintf("%s.%s.%s", accountName, userName, "SHA256:"+base64.StdEncoding.EncodeToString(hash[:])), + "sub": fmt.Sprintf("%s.%s", accountName, userName), + "iat": issueAtTime.Unix(), + "nbf": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(), + "exp": issueAtTime.Add(config.JWTExpireTimeout).Unix(), + }) + + tokenString, err := token.SignedString(config.PrivateKey) + + if err != nil { + return "", err + } + + return tokenString, err +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go b/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go new file mode 100644 index 000000000000..3672d8053277 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go @@ -0,0 +1,254 @@ +package gosnowflake + +// +// Copyright (c) 2019 Snowflake Computing Inc. All right reserved. +// + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/pkg/browser" +) + +const ( + successHTML = ` +SAML Response for Snowflake + +Your identity was confirmed and propagated to Snowflake %v. +You can close this window now and go back where you started from. +` +) + +const ( + bufSize = 8192 +) + +// Builds a response to show to the user after successfully +// getting a response from Snowflake. +func buildResponse(application string) bytes.Buffer { + body := fmt.Sprintf(successHTML, application) + t := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(bytes.NewBufferString(body)), + ContentLength: int64(len(body)), + Request: nil, + Header: make(http.Header), + } + var b bytes.Buffer + t.Write(&b) + return b +} + +// This opens a socket that listens on all available unicast +// and any anycast IP addresses locally. By specifying "0", we are +// able to bind to a free port. +func bindToPort() (net.Listener, error) { + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + glog.V(1).Infof("unable to bind to a port on localhost, err: %v", err) + return nil, err + } + return l, nil +} + +// Opens a browser window (or new tab) with the configured IDP Url. +// This can / will fail if running inside a shell with no display, ie +// ssh'ing into a box attempting to authenticate via external browser. +func openBrowser(idpURL string) error { + err := browser.OpenURL(idpURL) + if err != nil { + glog.V(1).Infof("failed to open a browser. err: %v", err) + return err + } + return nil +} + +// Gets the IDP Url and Proof Key from Snowflake. +// Note: FuncPostAuthSaml will return a fully qualified error if +// there is something wrong getting data from Snowflake. +func getIdpURLProofKey( + ctx context.Context, + sr *snowflakeRestful, + authenticator string, + application string, + account string, + callbackPort int) (string, string, error) { + + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerContentTypeApplicationJSON + headers["User-Agent"] = userAgent + + clientEnvironment := authRequestClientEnvironment{ + Application: application, + Os: operatingSystem, + OsVersion: platform, + } + + requestMain := authRequestData{ + ClientAppID: clientType, + ClientAppVersion: SnowflakeGoDriverVersion, + AccountName: account, + ClientEnvironment: clientEnvironment, + Authenticator: authenticator, + BrowserModeRedirectPort: strconv.Itoa(callbackPort), + } + + authRequest := authRequest{ + Data: requestMain, + } + + jsonBody, err := json.Marshal(authRequest) + if err != nil { + glog.V(1).Infof("failed to serialize json. err: %v", err) + return "", "", err + } + + respd, err := sr.FuncPostAuthSAML(ctx, sr, headers, jsonBody, sr.LoginTimeout) + if err != nil { + return "", "", err + } + return respd.Data.SSOURL, respd.Data.ProofKey, nil +} + +// The response returned from Snowflake looks like so: +// GET /?token=encodedSamlToken +// Host: localhost:54001 +// Connection: keep-alive +// Upgrade-Insecure-Requests: 1 +// User-Agent: userAgentStr +// Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 +// Referer: https://myaccount.snowflakecomputing.com/fed/login +// Accept-Encoding: gzip, deflate, br +// Accept-Language: en-US,en;q=0.9 +// This extracts the token portion of the response. +func getTokenFromResponse(response string) (string, error) { + start := "GET /?token=" + arr := strings.Split(response, "\r\n") + if !strings.HasPrefix(arr[0], start) { + glog.V(1).Info("response is malformed. ") + return "", &SnowflakeError{ + Number: ErrFailedToParseResponse, + SQLState: SQLStateConnectionRejected, + Message: errMsgFailedToParseResponse, + MessageArgs: []interface{}{response}, + } + } + token := strings.TrimLeft(arr[0], start) + token = strings.Split(token, " ")[0] + return token, nil +} + +// Authentication by an external browser takes place via the following: +// - the golang snowflake driver communicates to Snowflake that the user wishes to +// authenticate via external browser +// - snowflake sends back the IDP Url configured at the Snowflake side for the +// provided account +// - the default browser is opened to that URL +// - user authenticates at the IDP, and is redirected to Snowflake +// - Snowflake directs the user back to the driver +// - authenticate is complete! +func authenticateByExternalBrowser( + ctx context.Context, + sr *snowflakeRestful, + authenticator string, + application string, + account string, + user string, + password string, +) ([]byte, []byte, error) { + l, err := bindToPort() + if err != nil { + return nil, nil, err + } + defer l.Close() + + callbackPort := l.Addr().(*net.TCPAddr).Port + idpURL, proofKey, err := getIdpURLProofKey( + ctx, sr, authenticator, application, account, callbackPort) + if err != nil { + return nil, nil, err + } + + if err = openBrowser(idpURL); err != nil { + return nil, nil, err + } + + encodedSamlResponseChan := make(chan string) + errChan := make(chan error) + + var encodedSamlResponse string + var errFromGoroutine error + conn, err := l.Accept() + if err != nil { + glog.V(1).Infof("unable to accept connection. err: %v", err) + log.Fatal(err) + } + go func(c net.Conn) { + var buf bytes.Buffer + total := 0 + encodedSamlResponse := "" + var errAccept error + for { + b := make([]byte, bufSize) + n, err := c.Read(b) + if err != nil { + if err != io.EOF { + glog.V(1).Infof("error reading from socket. err: %v", err) + errAccept = &SnowflakeError{ + Number: ErrFailedToGetExternalBrowserResponse, + SQLState: SQLStateConnectionRejected, + Message: errMsgFailedToGetExternalBrowserResponse, + MessageArgs: []interface{}{err}, + } + } + break + } + total += n + buf.Write(b) + if n < bufSize { + // We successfully read all data + s := string(buf.Bytes()[:total]) + encodedSamlResponse, errAccept = getTokenFromResponse(s) + break + } + buf.Grow(bufSize) + } + if encodedSamlResponse != "" { + httpResponse := buildResponse(application) + c.Write(httpResponse.Bytes()) + } + c.Close() + encodedSamlResponseChan <- encodedSamlResponse + errChan <- errAccept + }(conn) + + encodedSamlResponse = <-encodedSamlResponseChan + errFromGoroutine = <-errChan + + if errFromGoroutine != nil { + return nil, nil, errFromGoroutine + } + + escapedSamlResponse, err := url.QueryUnescape(encodedSamlResponse) + if err != nil { + glog.V(1).Infof("unable to unescape saml response. err: %v", err) + return nil, nil, err + } + return []byte(escapedSamlResponse), []byte(proofKey), nil +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/authokta.go b/vendor/github.com/snowflakedb/gosnowflake/authokta.go new file mode 100644 index 000000000000..c9375073c188 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/authokta.go @@ -0,0 +1,345 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "html" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/google/uuid" +) + +type authOKTARequest struct { + Username string `json:"username"` + Password string `json:"password"` +} + +type authOKTAResponse struct { + CookieToken string `json:"cookieToken"` +} + +/* +authenticateBySAML authenticates a user by SAML +SAML Authentication +1. query GS to obtain IDP token and SSO url +2. IMPORTANT Client side validation: + validate both token url and sso url contains same prefix + (protocol + host + port) as the given authenticator url. + Explanation: + This provides a way for the user to 'authenticate' the IDP it is + sending his/her credentials to. Without such a check, the user could + be coerced to provide credentials to an IDP impersonator. +3. query IDP token url to authenticate and retrieve access token +4. given access token, query IDP URL snowflake app to get SAML response +5. IMPORTANT Client side validation: + validate the post back url come back with the SAML response + contains the same prefix as the Snowflake's server url, which is the + intended destination url to Snowflake. +Explanation: + This emulates the behavior of IDP initiated login flow in the user + browser where the IDP instructs the browser to POST the SAML + assertion to the specific SP endpoint. This is critical in + preventing a SAML assertion issued to one SP from being sent to + another SP. +*/ +func authenticateBySAML( + ctx context.Context, + sr *snowflakeRestful, + oktaURL *url.URL, + application string, + account string, + user string, + password string, +) (samlResponse []byte, err error) { + glog.V(2).Info("step 1: query GS to obtain IDP token and SSO url") + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerContentTypeApplicationJSON + headers["User-Agent"] = userAgent + + clientEnvironment := authRequestClientEnvironment{ + Application: application, + Os: operatingSystem, + OsVersion: platform, + } + requestMain := authRequestData{ + ClientAppID: clientType, + ClientAppVersion: SnowflakeGoDriverVersion, + AccountName: account, + ClientEnvironment: clientEnvironment, + Authenticator: oktaURL.String(), + } + authRequest := authRequest{ + Data: requestMain, + } + params := &url.Values{} + jsonBody, err := json.Marshal(authRequest) + if err != nil { + return nil, err + } + glog.V(2).Infof("PARAMS for Auth: %v, %v", params, sr) + respd, err := sr.FuncPostAuthSAML(ctx, sr, headers, jsonBody, sr.LoginTimeout) + if err != nil { + return nil, err + } + if !respd.Success { + glog.V(1).Infoln("Authentication FAILED") + glog.Flush() + sr.Token = "" + sr.MasterToken = "" + sr.SessionID = -1 + code, err := strconv.Atoi(respd.Code) + if err != nil { + code = -1 + return nil, err + } + return nil, &SnowflakeError{ + Number: code, + SQLState: SQLStateConnectionRejected, + Message: respd.Message, + } + } + glog.V(2).Info("step 2: validate Token and SSO URL has the same prefix as oktaURL") + var tokenURL *url.URL + var ssoURL *url.URL + if tokenURL, err = url.Parse(respd.Data.TokenURL); err != nil { + return nil, fmt.Errorf("failed to parse token URL. %v", respd.Data.TokenURL) + } + if ssoURL, err = url.Parse(respd.Data.TokenURL); err != nil { + return nil, fmt.Errorf("failed to parse ssoURL URL. %v", respd.Data.SSOURL) + } + if !isPrefixEqual(oktaURL, ssoURL) || !isPrefixEqual(oktaURL, tokenURL) { + return nil, &SnowflakeError{ + Number: ErrCodeIdpConnectionError, + SQLState: SQLStateConnectionRejected, + Message: errMsgIdpConnectionError, + MessageArgs: []interface{}{oktaURL, respd.Data.TokenURL, respd.Data.SSOURL}, + } + } + glog.V(2).Info("step 3: query IDP token url to authenticate and retrieve access token") + jsonBody, err = json.Marshal(authOKTARequest{ + Username: user, + Password: password, + }) + if err != nil { + return nil, err + } + respa, err := sr.FuncPostAuthOKTA(ctx, sr, headers, jsonBody, respd.Data.TokenURL, sr.LoginTimeout) + if err != nil { + return nil, err + } + + glog.V(2).Info("step 4: query IDP URL snowflake app to get SAML response") + params = &url.Values{} + params.Add("RelayState", "/some/deep/link") + params.Add("onetimetoken", respa.CookieToken) + + headers = make(map[string]string) + headers["accept"] = "*/*" + bd, err := sr.FuncGetSSO(ctx, sr, params, headers, respd.Data.SSOURL, sr.LoginTimeout) + if err != nil { + return nil, err + } + glog.V(2).Info("step 5: validate post_back_url matches Snowflake URL") + tgtURL, err := postBackURL(bd) + if err != nil { + return nil, err + } + + fullURL := sr.getURL() + glog.V(2).Infof("tgtURL: %v, origURL: %v", tgtURL, fullURL) + if !isPrefixEqual(tgtURL, fullURL) { + return nil, &SnowflakeError{ + Number: ErrCodeSSOURLNotMatch, + SQLState: SQLStateConnectionRejected, + Message: errMsgSSOURLNotMatch, + MessageArgs: []interface{}{tgtURL, fullURL}, + } + } + return bd, nil +} + +func postBackURL(htmlData []byte) (url *url.URL, err error) { + idx0 := bytes.Index(htmlData, []byte(" 0 { + req.Bindings = make(map[string]execBindParameter, len(bindings)) + for i, n := 0, len(bindings); i < n; i++ { + t := goTypeToSnowflake(bindings[i].Value, tsmode) + glog.V(2).Infof("tmode: %v\n", t) + if t == "CHANGE_TYPE" { + tsmode, err = dataTypeMode(bindings[i].Value) + if err != nil { + return nil, err + } + } else { + var v1 interface{} + if t == "ARRAY" { + t, v1 = arrayToString(bindings[i].Value) + } else { + v1, err = valueToString(bindings[i].Value, tsmode) + } + if err != nil { + return nil, err + } + req.Bindings[strconv.Itoa(idx)] = execBindParameter{ + Type: t, + Value: v1, + } + idx++ + } + } + } + multiCount := ctx.Value(MultiStatementCount) + if multiCount != nil { + req.Parameters = map[string]interface{}{string(MultiStatementCount): multiCount} + } + glog.V(2).Infof("bindings: %v", req.Bindings) + glog.V(2).Infof("parameters: %v", req.Parameters) + + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake // TODO v1.1: change to JSON in case of PUT/GET + headers["User-Agent"] = userAgent + if serviceName, ok := sc.cfg.Params[serviceName]; ok { + headers["X-Snowflake-Service"] = *serviceName + } + + jsonBody, err := json.Marshal(req) + if err != nil { + return nil, err + } + + var data *execResponse + + requestID := uuid.New() + data, err = sc.rest.FuncPostQuery(ctx, sc.rest, &url.Values{}, headers, jsonBody, sc.rest.RequestTimeout, &requestID) + if err != nil { + return data, err + } + var code int + if data.Code != "" { + code, err = strconv.Atoi(data.Code) + if err != nil { + code = -1 + return data, err + } + } else { + code = -1 + } + glog.V(2).Infof("Success: %v, Code: %v", data.Success, code) + if !data.Success { + return nil, &SnowflakeError{ + Number: code, + SQLState: data.Data.SQLState, + Message: data.Message, + QueryID: data.Data.QueryID, + } + } + glog.V(2).Info("Exec/Query SUCCESS") + sc.cfg.Database = data.Data.FinalDatabaseName + sc.cfg.Schema = data.Data.FinalSchemaName + sc.cfg.Role = data.Data.FinalRoleName + sc.cfg.Warehouse = data.Data.FinalWarehouseName + sc.QueryID = data.Data.QueryID + sc.SQLState = data.Data.SQLState + sc.populateSessionParameters(data.Data.Parameters) + return data, err +} + +func (sc *snowflakeConn) Begin() (driver.Tx, error) { + return sc.BeginTx(context.TODO(), driver.TxOptions{}) +} + +func (sc *snowflakeConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + glog.V(2).Info("BeginTx") + if opts.ReadOnly { + return nil, &SnowflakeError{ + Number: ErrNoReadOnlyTransaction, + SQLState: SQLStateFeatureNotSupported, + Message: errMsgNoReadOnlyTransaction, + } + } + if int(opts.Isolation) != int(sql.LevelDefault) { + return nil, &SnowflakeError{ + Number: ErrNoDefaultTransactionIsolationLevel, + SQLState: SQLStateFeatureNotSupported, + Message: errMsgNoDefaultTransactionIsolationLevel, + } + } + if sc.rest == nil { + return nil, driver.ErrBadConn + } + _, err := sc.exec(ctx, "BEGIN", false, false, nil) + if err != nil { + return nil, err + } + return &snowflakeTx{sc}, err +} + +func (sc *snowflakeConn) cleanup() { + glog.Flush() // must flush log buffer while the process is running. + sc.rest = nil + sc.cfg = nil +} + +func (sc *snowflakeConn) Close() (err error) { + glog.V(2).Infoln("Close") + sc.stopHeartBeat() + + err = sc.rest.FuncCloseSession(context.TODO(), sc.rest, sc.rest.RequestTimeout) + if err != nil { + glog.V(2).Info(err) + } + sc.cleanup() + return nil +} + +func (sc *snowflakeConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + glog.V(2).Infoln("Prepare") + if sc.rest == nil { + return nil, driver.ErrBadConn + } + stmt := &snowflakeStmt{ + sc: sc, + query: query, + } + return stmt, nil +} + +func (sc *snowflakeConn) Prepare(query string) (driver.Stmt, error) { + return sc.PrepareContext(context.TODO(), query) +} + +func (sc *snowflakeConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + glog.V(2).Infof("Exec: %#v, %v", query, args) + if sc.rest == nil { + return nil, driver.ErrBadConn + } + // TODO: handle noResult and isInternal + data, err := sc.exec(ctx, query, false, false, args) + if err != nil { + glog.V(2).Infof("error: %v", err) + if data != nil { + code, err := strconv.Atoi(data.Code) + if err != nil { + return nil, err + } + return nil, &SnowflakeError{ + Number: code, + SQLState: data.Data.SQLState, + Message: err.Error(), + QueryID: data.Data.QueryID} + } + return nil, err + } + + var updatedRows int64 + if sc.isDml(data.Data.StatementTypeID) { + // collects all values from the returned row sets + updatedRows, err = updateRows(data.Data) + if err != nil { + return nil, err + } + glog.V(2).Infof("number of updated rows: %#v", updatedRows) + return &snowflakeResult{ + affectedRows: updatedRows, + insertID: -1, + queryID: sc.QueryID, + }, nil // last insert id is not supported by Snowflake + } else if sc.isMultiStmt(data.Data) { + childResults := getChildResults(data.Data.ResultIDs, data.Data.ResultTypes) + for _, child := range childResults { + resultPath := fmt.Sprintf("/queries/%s/result", child.id) + childData, err := sc.getQueryResult(ctx, resultPath) + if err != nil { + glog.V(2).Infof("error: %v", err) + code, err := strconv.Atoi(childData.Code) + if err != nil { + return nil, err + } + if childData != nil { + return nil, &SnowflakeError{ + Number: code, + SQLState: childData.Data.SQLState, + Message: err.Error(), + QueryID: childData.Data.QueryID} + } + return nil, err + } + if sc.isDml(childData.Data.StatementTypeID) { + count, err := updateRows(childData.Data) + if err != nil { + glog.V(2).Infof("error: %v", err) + if childData != nil { + code, err := strconv.Atoi(childData.Code) + if err != nil { + return nil, err + } + return nil, &SnowflakeError{ + Number: code, + SQLState: childData.Data.SQLState, + Message: err.Error(), + QueryID: childData.Data.QueryID} + } + return nil, err + } + updatedRows += count + } + } + glog.V(2).Infof("number of updated rows: %#v", updatedRows) + return &snowflakeResult{ + affectedRows: updatedRows, + insertID: -1, + queryID: sc.QueryID, + }, nil + } + glog.V(2).Info("DDL") + return driver.ResultNoRows, nil +} + +func (sc *snowflakeConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + glog.V(2).Infof("Query: %#v, %v", query, args) + if sc.rest == nil { + return nil, driver.ErrBadConn + } + // TODO: handle noResult and isInternal + data, err := sc.exec(ctx, query, false, false, args) + if err != nil { + glog.V(2).Infof("error: %v", err) + if data != nil { + code, err := strconv.Atoi(data.Code) + if err != nil { + return nil, err + } + return nil, &SnowflakeError{ + Number: code, + SQLState: data.Data.SQLState, + Message: err.Error(), + QueryID: data.Data.QueryID} + } + return nil, err + } + + rows := new(snowflakeRows) + rows.sc = sc + rows.RowType = data.Data.RowType + rows.ChunkDownloader = &snowflakeChunkDownloader{ + sc: sc, + ctx: ctx, + CurrentChunk: make([]chunkRowType, len(data.Data.RowSet)), + ChunkMetas: data.Data.Chunks, + Total: data.Data.Total, + TotalRowIndex: int64(-1), + CellCount: len(data.Data.RowType), + Qrmk: data.Data.Qrmk, + QueryResultFormat: data.Data.QueryResultFormat, + ChunkHeader: data.Data.ChunkHeaders, + FuncDownload: downloadChunk, + FuncDownloadHelper: downloadChunkHelper, + FuncGet: getChunk, + RowSet: rowSetType{RowType: data.Data.RowType, + JSON: data.Data.RowSet, + RowSetBase64: data.Data.RowSetBase64, + }, + } + rows.queryID = sc.QueryID + + if sc.isMultiStmt(data.Data) { + childResults := getChildResults(data.Data.ResultIDs, data.Data.ResultTypes) + var nextChunkDownloader *snowflakeChunkDownloader + firstResultSet := false + + for _, child := range childResults { + resultPath := fmt.Sprintf("/queries/%s/result", child.id) + childData, err := sc.getQueryResult(ctx, resultPath) + if err != nil { + glog.V(2).Infof("error: %v", err) + if childData != nil { + code, err := strconv.Atoi(childData.Code) + if err != nil { + return nil, err + } + return nil, &SnowflakeError{ + Number: code, + SQLState: childData.Data.SQLState, + Message: err.Error(), + QueryID: childData.Data.QueryID} + } + return nil, err + } + if !firstResultSet { + // populate rows.ChunkDownloader with the first child + rows.ChunkDownloader = populateChunkDownloader(ctx, sc, childData.Data) + nextChunkDownloader = rows.ChunkDownloader + firstResultSet = true + } else { + nextChunkDownloader.NextDownloader = populateChunkDownloader(ctx, sc, childData.Data) + nextChunkDownloader = nextChunkDownloader.NextDownloader + } + } + } + + rows.ChunkDownloader.start() + return rows, err +} + +func (sc *snowflakeConn) Exec( + query string, + args []driver.Value) ( + driver.Result, error) { + return sc.ExecContext(context.TODO(), query, toNamedValues(args)) +} + +func (sc *snowflakeConn) Query( + query string, + args []driver.Value) ( + driver.Rows, error) { + return sc.QueryContext(context.TODO(), query, toNamedValues(args)) +} + +func (sc *snowflakeConn) Ping(ctx context.Context) error { + glog.V(2).Infoln("Ping") + if sc.rest == nil { + return driver.ErrBadConn + } + // TODO: handle noResult and isInternal + _, err := sc.exec(ctx, "SELECT 1", false, false, []driver.NamedValue{}) + return err +} + +func (sc *snowflakeConn) CheckNamedValue(nv *driver.NamedValue) error { + switch reflect.TypeOf(nv.Value) { + case reflect.TypeOf([]int{0}), reflect.TypeOf([]int64{0}), reflect.TypeOf([]float64{0}), + reflect.TypeOf([]bool{false}), reflect.TypeOf([]string{""}): + return nil + default: + return driver.ErrSkip + } +} + +func (sc *snowflakeConn) populateSessionParameters(parameters []nameValueParameter) { + // other session parameters (not all) + glog.V(2).Infof("params: %#v", parameters) + for _, param := range parameters { + v := "" + switch param.Value.(type) { + case int64: + if vv, ok := param.Value.(int64); ok { + v = strconv.FormatInt(vv, 10) + } + case float64: + if vv, ok := param.Value.(float64); ok { + v = strconv.FormatFloat(vv, 'g', -1, 64) + } + case bool: + if vv, ok := param.Value.(bool); ok { + v = strconv.FormatBool(vv) + } + default: + if vv, ok := param.Value.(string); ok { + v = vv + } + } + glog.V(3).Infof("parameter. name: %v, value: %v", param.Name, v) + sc.cfg.Params[strings.ToLower(param.Name)] = &v + } +} + +func (sc *snowflakeConn) isClientSessionKeepAliveEnabled() bool { + v, ok := sc.cfg.Params[sessionClientSessionKeepAlive] + if !ok { + return false + } + return strings.Compare(*v, "true") == 0 +} + +func (sc *snowflakeConn) startHeartBeat() { + if !sc.isClientSessionKeepAliveEnabled() { + return + } + sc.rest.HeartBeat = &heartbeat{ + restful: sc.rest, + } + sc.rest.HeartBeat.start() +} + +func (sc *snowflakeConn) stopHeartBeat() { + if !sc.isClientSessionKeepAliveEnabled() { + return + } + sc.rest.HeartBeat.stop() +} + +func updateRows(data execResponseData) (int64, error) { + var count int64 + for i, n := 0, len(data.RowType); i < n; i++ { + v, err := strconv.ParseInt(*data.RowSet[0][i], 10, 64) + if err != nil { + return -1, err + } + count += v + } + return count, nil +} + +type childResult struct { + id string + typ string +} + +func getChildResults(IDs string, types string) []childResult { + if IDs == "" { + return nil + } + queryIDs := strings.Split(IDs, ",") + resultTypes := strings.Split(types, ",") + res := make([]childResult, len(queryIDs)) + for i, id := range queryIDs { + res[i] = childResult{id, resultTypes[i]} + } + return res +} + +func (sc *snowflakeConn) getQueryResult(ctx context.Context, resultPath string) (*execResponse, error) { + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake + headers["User-Agent"] = userAgent + if serviceName, ok := sc.cfg.Params[serviceName]; ok { + headers["X-Snowflake-Service"] = *serviceName + } + param := make(url.Values) + param.Add(requestIDKey, uuid.New().String()) + param.Add("clientStartTime", strconv.FormatInt(time.Now().Unix(), 10)) + param.Add(requestGUIDKey, uuid.New().String()) + if sc.rest.Token != "" { + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sc.rest.Token) + } + url := sc.rest.getFullURL(resultPath, ¶m) + res, err := sc.rest.FuncGet(ctx, sc.rest, url, headers, sc.rest.RequestTimeout) + if err != nil { + glog.V(1).Infof("failed to get response. err: %v", err) + glog.Flush() + return nil, err + } + var respd *execResponse + err = json.NewDecoder(res.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return nil, err + } + return respd, nil +} + +func populateChunkDownloader(ctx context.Context, sc *snowflakeConn, data execResponseData) *snowflakeChunkDownloader { + return &snowflakeChunkDownloader{ + sc: sc, + ctx: ctx, + CurrentChunk: make([]chunkRowType, len(data.RowSet)), + ChunkMetas: data.Chunks, + Total: data.Total, + TotalRowIndex: int64(-1), + CellCount: len(data.RowType), + Qrmk: data.Qrmk, + QueryResultFormat: data.QueryResultFormat, + ChunkHeader: data.ChunkHeaders, + FuncDownload: downloadChunk, + FuncDownloadHelper: downloadChunkHelper, + FuncGet: getChunk, + RowSet: rowSetType{RowType: data.RowType, + JSON: data.RowSet, + RowSetBase64: data.RowSetBase64, + }, + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/converter.go b/vendor/github.com/snowflakedb/gosnowflake/converter.go new file mode 100644 index 000000000000..dff7d5f743de --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/converter.go @@ -0,0 +1,519 @@ +// Copyright (c) 2017-2020 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "database/sql/driver" + "encoding/hex" + "fmt" + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/decimal128" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" +) + +// goTypeToSnowflake translates Go data type to Snowflake data type. +func goTypeToSnowflake(v driver.Value, tsmode string) string { + switch v := v.(type) { + case int64: + return "FIXED" + case float64: + return "REAL" + case bool: + return "BOOLEAN" + case string: + return "TEXT" + case []byte: + if tsmode == "BINARY" { + return "BINARY" // may be redundant but ensures BINARY type + } + if v == nil || len(v) != 1 { + return "TEXT" // invalid byte array. won't take as BINARY + } + _, err := dataTypeMode(v) + if err != nil { + return "TEXT" // not supported dataType + } + return "CHANGE_TYPE" + case []int, []int64, []float64, []bool, []string: + return "ARRAY" + case time.Time: + return tsmode + } + return "TEXT" +} + +// snowflakeTypeToGo translates Snowflake data type to Go data type. +func snowflakeTypeToGo(dbtype string, scale int64) reflect.Type { + switch dbtype { + case "fixed": + if scale == 0 { + return reflect.TypeOf(int64(0)) + } + return reflect.TypeOf(float64(0)) + case "real": + return reflect.TypeOf(float64(0)) + case "text", "variant", "object", "array": + return reflect.TypeOf("") + case "date", "time", "timestamp_ltz", "timestamp_ntz", "timestamp_tz": + return reflect.TypeOf(time.Now()) + case "binary": + return reflect.TypeOf([]byte{}) + case "boolean": + return reflect.TypeOf(true) + } + glog.V(1).Infof("unsupported dbtype is specified. %v", dbtype) + glog.Flush() + return reflect.TypeOf("") +} + +// valueToString converts arbitrary golang type to a string. This is mainly used in binding data with placeholders +// in queries. +func valueToString(v driver.Value, tsmode string) (*string, error) { + glog.V(2).Infof("TYPE: %v, %v", reflect.TypeOf(v), reflect.ValueOf(v)) + if v == nil { + return nil, nil + } + v1 := reflect.ValueOf(v) + switch v1.Kind() { + case reflect.Bool: + s := strconv.FormatBool(v1.Bool()) + return &s, nil + case reflect.Int64: + s := strconv.FormatInt(v1.Int(), 10) + return &s, nil + case reflect.Float64: + s := strconv.FormatFloat(v1.Float(), 'g', -1, 32) + return &s, nil + case reflect.String: + s := v1.String() + return &s, nil + case reflect.Slice, reflect.Map: + if v1.IsNil() { + return nil, nil + } + if bd, ok := v.([]byte); ok { + if tsmode == "BINARY" { + s := hex.EncodeToString(bd) + return &s, nil + } + } + // TODO: is this good enough? + s := v1.String() + return &s, nil + case reflect.Struct: + if tm, ok := v.(time.Time); ok { + switch tsmode { + case "DATE": + _, offset := tm.Zone() + tm = tm.Add(time.Second * time.Duration(offset)) + s := fmt.Sprintf("%d", tm.Unix()*1000) + return &s, nil + case "TIME": + s := fmt.Sprintf("%d", + (tm.Hour()*3600+tm.Minute()*60+tm.Second())*1e9+tm.Nanosecond()) + return &s, nil + case "TIMESTAMP_NTZ", "TIMESTAMP_LTZ": + s := fmt.Sprintf("%d", tm.UnixNano()) + return &s, nil + case "TIMESTAMP_TZ": + _, offset := tm.Zone() + s := fmt.Sprintf("%v %v", tm.UnixNano(), offset/60+1440) + return &s, nil + } + } + } + return nil, fmt.Errorf("unsupported type: %v", v1.Kind()) +} + +// extractTimestamp extracts the internal timestamp data to epoch time in seconds and milliseconds +func extractTimestamp(srcValue *string) (sec int64, nsec int64, err error) { + glog.V(2).Infof("SRC: %v", srcValue) + var i int + for i = 0; i < len(*srcValue); i++ { + if (*srcValue)[i] == '.' { + sec, err = strconv.ParseInt((*srcValue)[0:i], 10, 64) + if err != nil { + return 0, 0, err + } + break + } + } + if i == len(*srcValue) { + // no fraction + sec, err = strconv.ParseInt(*srcValue, 10, 64) + if err != nil { + return 0, 0, err + } + nsec = 0 + } else { + s := (*srcValue)[i+1:] + nsec, err = strconv.ParseInt(s+strings.Repeat("0", 9-len(s)), 10, 64) + if err != nil { + return 0, 0, err + } + } + glog.V(2).Infof("sec: %v, nsec: %v", sec, nsec) + return sec, nsec, nil +} + +// stringToValue converts a pointer of string data to an arbitrary golang variable. This is mainly used in fetching +// data. +func stringToValue(dest *driver.Value, srcColumnMeta execResponseRowType, srcValue *string) error { + if srcValue == nil { + glog.V(3).Infof("snowflake data type: %v, raw value: nil", srcColumnMeta.Type) + *dest = nil + return nil + } + glog.V(3).Infof("snowflake data type: %v, raw value: %v", srcColumnMeta.Type, *srcValue) + switch srcColumnMeta.Type { + case "text", "fixed", "real", "variant", "object": + *dest = *srcValue + return nil + case "date": + v, err := strconv.ParseInt(*srcValue, 10, 64) + if err != nil { + return err + } + *dest = time.Unix(v*86400, 0).UTC() + return nil + case "time": + sec, nsec, err := extractTimestamp(srcValue) + if err != nil { + return err + } + t0 := time.Time{} + *dest = t0.Add(time.Duration(sec*1e9 + nsec)) + return nil + case "timestamp_ntz": + sec, nsec, err := extractTimestamp(srcValue) + if err != nil { + return err + } + *dest = time.Unix(sec, nsec).UTC() + return nil + case "timestamp_ltz": + sec, nsec, err := extractTimestamp(srcValue) + if err != nil { + return err + } + *dest = time.Unix(sec, nsec) + return nil + case "timestamp_tz": + glog.V(2).Infof("tz: %v", *srcValue) + + tm := strings.Split(*srcValue, " ") + if len(tm) != 2 { + return &SnowflakeError{ + Number: ErrInvalidTimestampTz, + SQLState: SQLStateInvalidDataTimeFormat, + Message: fmt.Sprintf("invalid TIMESTAMP_TZ data. The value doesn't consist of two numeric values separated by a space: %v", *srcValue), + } + } + sec, nsec, err := extractTimestamp(&tm[0]) + if err != nil { + return err + } + offset, err := strconv.ParseInt(tm[1], 10, 64) + if err != nil { + return &SnowflakeError{ + Number: ErrInvalidTimestampTz, + SQLState: SQLStateInvalidDataTimeFormat, + Message: fmt.Sprintf("invalid TIMESTAMP_TZ data. The offset value is not integer: %v", tm[1]), + } + } + loc := Location(int(offset) - 1440) + tt := time.Unix(sec, nsec) + *dest = tt.In(loc) + return nil + case "binary": + b, err := hex.DecodeString(*srcValue) + if err != nil { + return &SnowflakeError{ + Number: ErrInvalidBinaryHexForm, + SQLState: SQLStateNumericValueOutOfRange, + Message: err.Error(), + } + } + *dest = b + return nil + } + *dest = *srcValue + return nil +} + +func arrayToString(v driver.Value) (string, []string) { + var t string + var arr []string + switch a := v.(type) { + case []int: + t = "FIXED" + for _, x := range a { + arr = append(arr, strconv.Itoa(x)) + } + case []int64: + t = "FIXED" + for _, x := range a { + arr = append(arr, strconv.Itoa(int(x))) + } + case []float64: + t = "REAL" + for _, x := range a { + arr = append(arr, fmt.Sprintf("%g", x)) + } + case []bool: + t = "BOOLEAN" + for _, x := range a { + arr = append(arr, strconv.FormatBool(x)) + } + case []string: + t = "TEXT" + arr = a + } + return t, arr +} + +var decimalShift = new(big.Int).Exp(big.NewInt(2), big.NewInt(64), nil) + +func intToBigFloat(val int64, scale int64) *big.Float { + f := new(big.Float).SetInt64(val) + s := new(big.Float).SetInt(new(big.Int).Exp(big.NewInt(10), big.NewInt(scale), nil)) + return new(big.Float).Quo(f, s) +} + +func decimalToBigInt(num decimal128.Num) *big.Int { + high := new(big.Int).SetInt64(num.HighBits()) + low := new(big.Int).SetUint64(num.LowBits()) + return new(big.Int).Add(new(big.Int).Mul(high, decimalShift), low) +} + +func decimalToBigFloat(num decimal128.Num, scale int64) *big.Float { + f := new(big.Float).SetInt(decimalToBigInt(num)) + s := new(big.Float).SetInt(new(big.Int).Exp(big.NewInt(10), big.NewInt(scale), nil)) + return new(big.Float).Quo(f, s) +} + +func stringIntToDecimal(src string) (decimal128.Num, bool) { + b, ok := new(big.Int).SetString(src, 10) + if !ok { + return decimal128.Num{}, ok + } + var high, low big.Int + high.QuoRem(b, decimalShift, &low) + return decimal128.New(high.Int64(), low.Uint64()), ok +} + +func stringFloatToDecimal(src string, scale int64) (decimal128.Num, bool) { + b, ok := new(big.Float).SetString(src) + if !ok { + return decimal128.Num{}, ok + } + s := new(big.Float).SetInt(new(big.Int).Exp(big.NewInt(10), big.NewInt(scale), nil)) + n := new(big.Float).Mul(b, s) + if !n.IsInt() { + return decimal128.Num{}, false + } + var high, low, z big.Int + n.Int(&z) + high.QuoRem(&z, decimalShift, &low) + return decimal128.New(high.Int64(), low.Uint64()), ok +} + +// Arrow Interface (Column) converter. This is called when Arrow chunks are downloaded to convert to the corresponding +// row type. +func arrowToValue(destcol *[]snowflakeValue, srcColumnMeta execResponseRowType, srcValue array.Interface) error { + data := srcValue.Data() + var err error + if len(*destcol) != srcValue.Data().Len() { + err = fmt.Errorf("array interface length mismatch") + } + glog.V(3).Infof("snowflake data type: %v, arrow data type: %v", srcColumnMeta.Type, srcValue.DataType()) + + switch strings.ToUpper(srcColumnMeta.Type) { + case "FIXED": + switch srcValue.DataType().ID() { + case arrow.DECIMAL: + for i, num := range array.NewDecimal128Data(data).Values() { + if !srcValue.IsNull(i) { + if srcColumnMeta.Scale == 0 { + (*destcol)[i] = decimalToBigInt(num) + } else { + (*destcol)[i] = decimalToBigFloat(num, srcColumnMeta.Scale) + } + } + } + case arrow.INT64: + for i, val := range array.NewInt64Data(data).Int64Values() { + if !srcValue.IsNull(i) { + if srcColumnMeta.Scale == 0 { + (*destcol)[i] = val + } else { + f := intToBigFloat(val, srcColumnMeta.Scale) + (*destcol)[i] = f + } + } + } + case arrow.INT32: + for i, val := range array.NewInt32Data(data).Int32Values() { + if !srcValue.IsNull(i) { + if srcColumnMeta.Scale == 0 { + (*destcol)[i] = int64(val) + } else { + f := intToBigFloat(int64(val), srcColumnMeta.Scale) + (*destcol)[i] = f + } + } + } + case arrow.INT16: + for i, val := range array.NewInt16Data(data).Int16Values() { + if !srcValue.IsNull(i) { + if srcColumnMeta.Scale == 0 { + (*destcol)[i] = int64(val) + } else { + f := intToBigFloat(int64(val), srcColumnMeta.Scale) + (*destcol)[i] = f + } + } + } + case arrow.INT8: + for i, val := range array.NewInt8Data(data).Int8Values() { + if !srcValue.IsNull(i) { + if srcColumnMeta.Scale == 0 { + (*destcol)[i] = int64(val) + } else { + f := intToBigFloat(int64(val), srcColumnMeta.Scale) + (*destcol)[i] = f + } + } + } + } + return err + case "BOOLEAN": + boolData := array.NewBooleanData(data) + for i := range *destcol { + if !srcValue.IsNull(i) { + (*destcol)[i] = boolData.Value(i) + } + } + return err + case "REAL": + for i, float64 := range array.NewFloat64Data(data).Float64Values() { + if !srcValue.IsNull(i) { + (*destcol)[i] = float64 + } + } + return err + case "TEXT", "ARRAY", "VARIANT", "OBJECT": + strings := array.NewStringData(data) + for i := range *destcol { + if !srcValue.IsNull(i) { + (*destcol)[i] = strings.Value(i) + } + } + return err + case "BINARY": + binaryData := array.NewBinaryData(data) + for i := range *destcol { + if !srcValue.IsNull(i) { + (*destcol)[i] = binaryData.Value(i) + } + } + return err + case "DATE": + for i, date32 := range array.NewDate32Data(data).Date32Values() { + if !srcValue.IsNull(i) { + t0 := time.Unix(int64(date32)*86400, 0).UTC() + (*destcol)[i] = t0 + } + } + return err + case "TIME": + if srcValue.DataType().ID() == arrow.INT64 { + for i, int64 := range array.NewInt64Data(data).Int64Values() { + if !srcValue.IsNull(i) { + t0 := time.Time{} + (*destcol)[i] = t0.Add(time.Duration(int64)) + } + } + } else { + for i, int32 := range array.NewInt32Data(data).Int32Values() { + if !srcValue.IsNull(i) { + t0 := time.Time{} + (*destcol)[i] = t0.Add(time.Duration(int64(int32) * int64(math.Pow10(9-int(srcColumnMeta.Scale))))) + } + } + } + return err + case "TIMESTAMP_NTZ": + if srcValue.DataType().ID() == arrow.STRUCT { + structData := array.NewStructData(data) + epoch := array.NewInt64Data(structData.Field(0).Data()).Int64Values() + fraction := array.NewInt32Data(structData.Field(1).Data()).Int32Values() + for i := range *destcol { + if !srcValue.IsNull(i) { + (*destcol)[i] = time.Unix(epoch[i], int64(fraction[i])).UTC() + } + } + } else { + for i, t := range array.NewInt64Data(data).Int64Values() { + if !srcValue.IsNull(i) { + (*destcol)[i] = time.Unix(0, t*int64(math.Pow10(9-int(srcColumnMeta.Scale)))).UTC() + } + } + } + return err + case "TIMESTAMP_LTZ": + if srcValue.DataType().ID() == arrow.STRUCT { + structData := array.NewStructData(data) + epoch := array.NewInt64Data(structData.Field(0).Data()).Int64Values() + fraction := array.NewInt32Data(structData.Field(1).Data()).Int32Values() + for i := range *destcol { + if !srcValue.IsNull(i) { + (*destcol)[i] = time.Unix(epoch[i], int64(fraction[i])) + } + } + } else { + for i, t := range array.NewInt64Data(data).Int64Values() { + if !srcValue.IsNull(i) { + q := t / int64(math.Pow10(int(srcColumnMeta.Scale))) + r := t % int64(math.Pow10(int(srcColumnMeta.Scale))) + (*destcol)[i] = time.Unix(q, r) + } + } + } + return err + case "TIMESTAMP_TZ": + structData := array.NewStructData(data) + if structData.NumField() == 2 { + epoch := array.NewInt64Data(structData.Field(0).Data()).Int64Values() + timezone := array.NewInt32Data(structData.Field(1).Data()).Int32Values() + for i := range *destcol { + if !srcValue.IsNull(i) { + loc := Location(int(timezone[i]) - 1440) + tt := time.Unix(epoch[i], 0) + (*destcol)[i] = tt.In(loc) + } + } + } else { + epoch := array.NewInt64Data(structData.Field(0).Data()).Int64Values() + fraction := array.NewInt32Data(structData.Field(1).Data()).Int32Values() + timezone := array.NewInt32Data(structData.Field(2).Data()).Int32Values() + for i := range *destcol { + if !srcValue.IsNull(i) { + loc := Location(int(timezone[i]) - 1440) + tt := time.Unix(epoch[i], int64(fraction[i])) + (*destcol)[i] = tt.In(loc) + } + } + } + return err + } + + err = fmt.Errorf("unsupported data type") + return err +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/datatype.go b/vendor/github.com/snowflakedb/gosnowflake/datatype.go new file mode 100644 index 000000000000..ee2450cce1b2 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/datatype.go @@ -0,0 +1,165 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "fmt" +) + +const ( + fixedType byte = iota + realType + textType + dateType + variantType + timestampLtzType + timestampNtzType + timestampTzType + objectType + arrayType + binaryType + timeType + booleanType +) + +var ( + // DataTypeFixed is a FIXED datatype. + DataTypeFixed = []byte{fixedType} + // DataTypeReal is a REAL datatype. + DataTypeReal = []byte{realType} + // DataTypeText is a TEXT datatype. + DataTypeText = []byte{textType} + // DataTypeDate is a Date datatype. + DataTypeDate = []byte{dateType} + // DataTypeVariant is a TEXT datatype. + DataTypeVariant = []byte{variantType} + // DataTypeTimestampLtz is a TIMESTAMP_LTZ datatype. + DataTypeTimestampLtz = []byte{timestampLtzType} + // DataTypeTimestampNtz is a TIMESTAMP_NTZ datatype. + DataTypeTimestampNtz = []byte{timestampNtzType} + // DataTypeTimestampTz is a TIMESTAMP_TZ datatype. + DataTypeTimestampTz = []byte{timestampTzType} + // DataTypeObject is a OBJECT datatype. + DataTypeObject = []byte{objectType} + // DataTypeArray is a ARRAY datatype. + DataTypeArray = []byte{arrayType} + // DataTypeBinary is a BINARY datatype. + DataTypeBinary = []byte{binaryType} + // DataTypeTime is a TIME datatype. + DataTypeTime = []byte{timeType} + // DataTypeBoolean is a BOOLEAN datatype. + DataTypeBoolean = []byte{booleanType} +) + +// dataTypeMode returns the subsequent data type in a string representation. +func dataTypeMode(v driver.Value) (tsmode string, err error) { + if bd, ok := v.([]byte); ok { + switch { + case bytes.Equal(bd, DataTypeDate): + tsmode = "DATE" + case bytes.Equal(bd, DataTypeTime): + tsmode = "TIME" + case bytes.Equal(bd, DataTypeTimestampLtz): + tsmode = "TIMESTAMP_LTZ" + case bytes.Equal(bd, DataTypeTimestampNtz): + tsmode = "TIMESTAMP_NTZ" + case bytes.Equal(bd, DataTypeTimestampTz): + tsmode = "TIMESTAMP_TZ" + case bytes.Equal(bd, DataTypeBinary): + tsmode = "BINARY" + default: + return "", fmt.Errorf(errMsgInvalidByteArray, v) + } + } else { + return "", fmt.Errorf(errMsgInvalidByteArray, v) + } + return tsmode, nil +} + +// SnowflakeParameter includes the columns output from SHOW PARAMETER command. +type SnowflakeParameter struct { + Key string + Value string + Default string + Level string + Description string + SetByUser string + SetInJob string + SetOn string + SetByThreadID string + SetByThreadName string + SetByClass string + ParameterComment string + Type string + IsExpired string + ExpiresAt string + SetByControllingParameter string + ActivateVersion string + PartialRollout string + Unknown string // Reserve for added parameter +} + +func populateSnowflakeParameter(colname string, p *SnowflakeParameter) interface{} { + switch colname { + case "key": + return &p.Key + case "value": + return &p.Value + case "default": + return &p.Default + case "level": + return &p.Level + case "description": + return &p.Description + case "set_by_user": + return &p.SetByUser + case "set_in_job": + return &p.SetInJob + case "set_on": + return &p.SetOn + case "set_by_thread_id": + return &p.SetByThreadID + case "set_by_thread_name": + return &p.SetByThreadName + case "set_by_class": + return &p.SetByClass + case "parameter_comment": + return &p.ParameterComment + case "type": + return &p.Type + case "is_expired": + return &p.IsExpired + case "expires_at": + return &p.ExpiresAt + case "set_by_controlling_parameter": + return &p.SetByControllingParameter + case "activate_version": + return &p.ActivateVersion + case "partial_rollout": + return &p.PartialRollout + default: + debugPanicf("unknown type: %v", colname) + return &p.Unknown + } +} + +// ScanSnowflakeParameter binds SnowflakeParameter variable with an array of column buffer. +func ScanSnowflakeParameter(rows *sql.Rows) (*SnowflakeParameter, error) { + var err error + var columns []string + columns, err = rows.Columns() + if err != nil { + return nil, err + } + colNum := len(columns) + p := SnowflakeParameter{} + cols := make([]interface{}, colNum) + for i := 0; i < colNum; i++ { + cols[i] = populateSnowflakeParameter(columns[i], &p) + } + err = rows.Scan(cols...) + return &p, err +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/debug.go b/vendor/github.com/snowflakedb/gosnowflake/debug.go new file mode 100644 index 000000000000..d416e102fc4e --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/debug.go @@ -0,0 +1,9 @@ +// +build sfdebug + +package gosnowflake + +import "log" + +func debugPanicf(fmt string, args ...interface{}) { + log.Panicf(fmt, args...) +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/doc.go b/vendor/github.com/snowflakedb/gosnowflake/doc.go new file mode 100644 index 000000000000..a94abd33e6d1 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/doc.go @@ -0,0 +1,551 @@ +/* +Package gosnowflake is a pure Go Snowflake driver for the database/sql package. + +Clients can use the database/sql package directly. For example: + + import ( + "database/sql" + + _ "github.com/snowflakedb/gosnowflake" + ) + + func main() { + db, err := sql.Open("snowflake", "user:password@myaccount/mydb") + if err != nil { + log.Fatal(err) + } + defer db.Close() + ... + } + +Connection String + +Use the Open() function to create a database handle with connection parameters: + + db, err := sql.Open("snowflake", "") + +The Go Snowflake Driver supports the following connection syntaxes (or data source name (DSN) formats): + + * username[:password]@accountname/dbname/schemaname[?param1=value&...¶mN=valueN + * username[:password]@accountname/dbname[?param1=value&...¶mN=valueN + * username[:password]@hostname:port/dbname/schemaname?account=[¶m1=value&...¶mN=valueN] + +where all parameters must be escaped or use `Config` and `DSN` to construct a DSN string. + +The following example opens a database handle with the Snowflake account +myaccount where the username is jsmith, password is mypassword, database is +mydb, schema is testschema, and warehouse is mywh: + + db, err := sql.Open("snowflake", "jsmith:mypassword@myaccount/mydb/testschema?warehouse=mywh") + +Connection Parameters + +The connection string (DSN) can contain both connection parameters (described below) and session parameters +(https://docs.snowflake.com/en/sql-reference/parameters.html). + +The following connection parameters are supported: + + * account : Specifies the name of your Snowflake account, where string is the name + assigned to your account by Snowflake. In the URL you received from + Snowflake, your account name is the first segment in the domain (e.g. + abc123 in https://abc123.snowflakecomputing.com). This parameter is + optional if your account is specified after the @ character. If you + are not on us-west-2 region or AWS deployment, then append the region + after the account name, e.g. “.”. If you are not on + AWS deployment, then append not only the region, but also the platform, + e.g., “..”. Account, region, and platform + should be separated by a period (“.”), as shown above. If you are using + a global url, then append connection group and "global", + e.g., "account-.global". Account and connection group are + separated by a dash ("-"), as shown above. + + * region : DEPRECATED. You may specify a region, such as + “eu-central-1”, with this parameter. However, since this parameter + is deprecated, it is best to specify the region as part of the + account parameter. For details, see the description of the account + parameter. + + * database: Specifies the database to use by default in the client session + (can be changed after login). + + * schema: Specifies the database schema to use by default in the client + session (can be changed after login). + + * warehouse: Specifies the virtual warehouse to use by default for queries, + loading, etc. in the client session (can be changed after login). + + * role: Specifies the role to use by default for accessing Snowflake + objects in the client session (can be changed after login). + + * passcode: Specifies the passcode provided by Duo when using multi-factor authentication (MFA) for login. + + * passcodeInPassword: false by default. Set to true if the MFA passcode is + embedded in the login password. Appends the MFA passcode to the end of the + password. + + * loginTimeout: Specifies the timeout, in seconds, for login. The default + is 60 seconds. The login request gives up after the timeout length if the + HTTP response is success. + + * authenticator: Specifies the authenticator to use for authenticating user credentials: + - To use the internal Snowflake authenticator, specify snowflake (Default). + - To authenticate through Okta, specify https://.okta.com (URL prefix for Okta). + - To authenticate using your IDP via a browser, specify externalbrowser. + - To authenticate via OAuth, specify oauth and provide an OAuth Access Token (see the token parameter below). + + * application: Identifies your application to Snowflake Support. + + * insecureMode: false by default. Set to true to bypass the Online + Certificate Status Protocol (OCSP) certificate revocation check. + IMPORTANT: Change the default value for testing or emergency situations only. + + * token: a token that can be used to authenticate. Should be used in conjunction with the "oauth" authenticator. + + * client_session_keep_alive: Set to true have a heartbeat in the background every hour to keep the connection alive + such that the connection session will never expire. Care should be taken in using this option as it opens up + the access forever as long as the process is alive. + + * ocspFailOpen: true by default. Set to false to make OCSP check fail closed mode. + + * validateDefaultParameters: true by default. Set to false to disable checks on existence and privileges check for + Database, Schema, Warehouse and Role when setting up the connection + +All other parameters are interpreted as session parameters (https://docs.snowflake.com/en/sql-reference/parameters.html). +For example, the TIMESTAMP_OUTPUT_FORMAT session parameter can be set by adding: + + ...&TIMESTAMP_OUTPUT_FORMAT=MM-DD-YYYY... + +A complete connection string looks similar to the following: + + my_user_name:my_password@ac123456/my_database/my_schema?my_warehouse=inventory_warehouse&role=my_user_role&DATE_OUTPUT_FORMAT=YYYY-MM-DD + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + connection connection session + parameter parameter parameter + +Session-level parameters can also be set by using the SQL command "ALTER SESSION" +(https://docs.snowflake.com/en/sql-reference/sql/alter-session.html). + +Proxy + +The Go Snowflake Driver honors the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY for the forward proxy setting. + +NO_PROXY specifies which hostname endings should be allowed to bypass the proxy server, e.g. :code:`no_proxy=.amazonaws.com` means that AWS S3 access does not need to go through the proxy. + +NO_PROXY does not support wildcards. Each value specified should be one of the following: + + * The end of a hostname (or a complete hostname), for example: ".amazonaws.com" or "xy12345.snowflakecomputing.com". + + * An IP address, for example "192.196.1.15". + +If more than one value is specified, values should be separated by commas, for example: + + no_proxy=localhost,.my_company.com,xy12345.snowflakecomputing.com,192.168.1.15,192.168.1.16 + + +Logging + +By default, the driver's builtin logger is NOP; no output is generated. This is +intentional for those applications that use the same set of logger parameters +not to conflict with glog, which is incorporated in the driver logging +framework. + +In order to enable debug logging for the driver, add a build tag sfdebug to the +go tool command lines, for example: + + go build -tags=sfdebug + +In your application, you will need to import the "flag" module, and include code +to enable the logging. For example: + + if !flag.Parsed() { + // enable glog for Go Snowflake Driver + flag.Parse() + } + +For tests, run the test command with the tag along with glog parameters. For +example, the following command will generate all activity logs in the standard +error. + + go test -tags=sfdebug -v . -vmodule=*=2 -stderrthreshold=INFO + +Likewise, if you build your application with the tag, you may specify the same +set of glog parameters. + + your_go_program -vmodule=*=2 -stderrthreshold=INFO + +Using the -stderrthreshold option will result in logging being shown in the STDERR +of the executing shell. If you wish to have the logging in a file, then you may use +the -log_dir option, and give it a path to a directory where log files will be made. + + your_go_program -vmodule=*=2 -log_dir=/path/to/logs + +The -stderrthreshold option and the -log_dir option may also be used at the same time, +and the log data will be put in both places. + + your_go_program -vmodule=*=2 -stderrthreshold=INFO -log_dir=/path/to/logs + +To get the logs for a specific module, use the -vmodule option. For example, to +retrieve the driver.go and connection.go module logs: + + your_go_program -vmodule=driver=2,connection=2 -stderrthreshold=INFO + +Note: If your request retrieves no logs, call db.Close() or glog.flush() to flush the glog buffer. + +Note: The logger may be changed in the future for better logging. Currently if +the applications use the same parameters as glog, you cannot collect both +application and driver logs at the same time. + +Canceling Query by CtrlC + +From 0.5.0, a signal handling responsibility has moved to the applications. If you want to cancel a +query/command by Ctrl+C, add a os.Interrupt trap in context to execute methods that can take the context parameter, +e.g., QueryContext, ExecContext. + + // handle interrupt signal + ctx, cancel := context.WithCancel(context.Background()) + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + defer func() { + signal.Stop(c) + }() + go func() { + select { + case <-c: + cancel() + case <-ctx.Done(): + } + }() + ... (connection) + // execute a query + rows, err := db.QueryContext(ctx, query) + ... (Ctrl+C to cancel the query) + +See cmd/selectmany.go for the full example. + +Supported Data Types + +Queries return SQL column type information in the ColumnType type. The +DatabaseTypeName method returns strings representing Snowflake data types. +The following table shows those strings, the corresponding Snowflake data +type, and the corresponding Golang native data type. The columns are: + + 1. The string representation of the data type. + 2. The SQL data type. + 3. The default Golang data type that is returned when you use snowflakeRows.Scan() to read data from + JSON data format via an interface{}. (All returned values are JSON strings.) + 4. The standard Golang data type that is returned when you use snowflakeRows.Scan() to read data from + JSON data format directly. + 5. Footnotes numbers. + +This table shows the data types: + + ============================================================================================= + | | Default Go | Supported | + String | | Data Type | Go Data | + Representation | Snowflake Data Type | for Scan() | Types for | Footnotes + | | interface{} | Scan() | + | | (JSON) | (JSON) | + ============================================================================================== + BOOLEAN | BOOLEAN | string | bool | + TEXT | VARCHAR/STRING | string | string | + REAL | REAL/DOUBLE | string | float64 | [1] [2] + FIXED | INTEGER that fits in int64 | string | int64 | [1] [2] + FIXED | NUMBER(P, S) where S > 0 | string | | [1] [3] + DATE | DATE | string | time.Time | + TIME | TIME | string | time.Time | + TIMESTAMP_LTZ | TIMESTAMP_LTZ | string | time.Time | + TIMESTAMP_NTZ | TIMESTAMP_NTZ | string | time.Time | + TIMESTAMP_TZ | TIMESTAMP_TZ | string | time.Time | + BINARY | BINARY | string | []byte | + ARRAY | ARRAY | string | string | + OBJECT | OBJECT | string | string | + VARIANT | VARIANT | string | string | + +Footnotes: + + [1] Converting from a higher precision data type to a lower precision data type via the snowflakeRows.Scan() method can lose low bits (lose precision), lose high bits (completely change the value), or result in error. + + [2] Attempting to convert from a higher precision data type to a lower precision data type via interface{} causes an error. + + [3] If the value in Snowflake is too large to fit into the corresponding Golang data type, then conversion can return either an int64 with the high bits truncated or an error. + +Note: SQL NULL values are converted to Golang nil values, and vice-versa. + +Binding Parameters to Array Variables For Batch Inserts + +Version 1.3.9 (and later) of the Go Snowflake Driver supports the ability to bind an array variable to a parameter in an SQL +INSERT statement. You can use this technique to insert multiple rows in a single batch. + +As an example, the following code inserts rows into a table that contains integer, float, boolean, and string columns. The example +binds arrays to the parameters in the INSERT statement. + + // Create a table containing an integer, float, boolean, and string column. + _, err = db.Exec("create or replace table my_table(c1 int, c2 float, c3 boolean, c4 string)") + ... + // Define the arrays containing the data to insert. + intArray := []int{1, 2, 3} + fltArray := []float64{0.1, 2.34, 5.678} + boolArray := []bool{true, false, true} + strArray := []string{"test1", "test2", "test3"} + ... + // Insert the data from the arrays into the table. + _, err = db.Exec("insert into my_table values (?, ?, ?, ?)", intArray, fltArray, boolArray, strArray) + +Note: For alternative ways to load data into the Snowflake database (including bulk loading using the COPY command), see +Loading Data Into Snowflake (https://docs.snowflake.com/en/user-guide-data-load.html). + +Binding a Parameter to a Time Type + +Go's database/sql package supports the ability to bind a parameter in an SQL statement to a time.Time variable. +However, when the client binds data to send to the server, the driver cannot determine the correct Snowflake date/timestamp data +type to associate with the binding parameter. For example: + + dbt.mustExec("CREATE OR REPLACE TABLE tztest (id int, ntz, timestamp_ntz, ltz timestamp_ltz)") + // ... + stmt, err :=dbt.db.Prepare("INSERT INTO tztest(id,ntz,ltz) VALUES(1, ?, ?)") + // ... + tmValue time.Now() + // ... Is tmValue a TIMESTAMP_NTZ or TIMESTAMP_LTZ? + _, err = stmt.Exec(tmValue, tmValue) + +To resolve this issue, a binding parameter flag is introduced that associates +any subsequent time.Time type to the DATE, TIME, TIMESTAMP_LTZ, TIMESTAMP_NTZ +or BINARY data type. The above example could be rewritten as follows: + + import ( + sf "github.com/snowflakedb/gosnowflake" + ) + dbt.mustExec("CREATE OR REPLACE TABLE tztest (id int, ntz, timestamp_ntz, ltz timestamp_ltz)") + // ... + stmt, err :=dbt.db.Prepare("INSERT INTO tztest(id,ntz,ltz) VALUES(1, ?, ?)") + // ... + tmValue time.Now() + // ... + _, err = stmt.Exec(sf.DataTypeTimestampNtz, tmValue, sf.DataTypeTimestampLtz, tmValue) + +Timestamps with Time Zones + +The driver fetches TIMESTAMP_TZ (timestamp with time zone) data using the +offset-based Location types, which represent a collection of time offsets in +use in a geographical area, such as CET (Central European Time) or UTC +(Coordinated Universal Time). The offset-based Location data is generated and +cached when a Go Snowflake Driver application starts, and if the given offset +is not in the cache, it is generated dynamically. + +Currently, Snowflake doesn't support the name-based Location types, e.g., +"America/Los_Angeles". + +For more information about Location types, see the Go documentation for https://golang.org/pkg/time/#Location. + +Binary Data + +Internally, this feature leverages the []byte data type. As a result, BINARY +data cannot be bound without the binding parameter flag. In the following +example, sf is an alias for the gosnowflake package: + + var b = []byte{0x01, 0x02, 0x03} + _, err = stmt.Exec(sf.DataTypeBinary, b) + +Maximum number of Result Set Chunk Downloader + +The driver directly downloads a result set from the cloud storage if the size is large. It is +required to shift workloads from the Snowflake database to the clients for scale. The download takes place by goroutine +named "Chunk Downloader" asynchronously so that the driver can fetch the next result set while the application can +consume the current result set. + +The application may change the number of result set chunk downloader if required. Note this doesn't help reduce +memory footprint by itself. Consider Custom JSON Decoder. + + import ( + sf "github.com/snowflakedb/gosnowflake" + ) + sf.MaxChunkDownloadWorkers = 2 + + +Experimental: Custom JSON Decoder for parsing Result Set + +The application may have the driver use a custom JSON decoder that incrementally parses the result set as follows. + + import ( + sf "github.com/snowflakedb/gosnowflake" + ) + sf.CustomJSONDecoderEnabled = true + ... + +This option will reduce the memory footprint to half or even quarter, but it can significantly degrade the +performance depending on the environment. The test cases running on Travis Ubuntu box show five times less memory +footprint while four times slower. Be cautious when using the option. + +JWT authentication + +The Go Snowflake Driver supports JWT (JSON Web Token) authentication. + +To enable this feature, construct the DSN with fields "authenticator=SNOWFLAKE_JWT&privateKey=", +or using a Config structure specifying: + + config := &Config{ + ... + Authenticator: "SNOWFLAKE_JWT" + PrivateKey: "" + } + +The should be a base64 URL encoded PKCS8 rsa private key string. One way to encode a byte slice to URL +base 64 URL format is through the base64.URLEncoding.EncodeToString() function. + +On the server side, you can alter the public key with the SQL command: + + ALTER USER SET RSA_PUBLIC_KEY=''; + +The should be a base64 Standard encoded PKI public key string. One way to encode a byte slice to base +64 Standard format is through the base64.StdEncoding.EncodeToString() function. + +To generate the valid key pair, you can execute the following commands in the shell: + + # generate 2048-bit pkcs8 encoded RSA private key + openssl genpkey -algorithm RSA \ + -pkeyopt rsa_keygen_bits:2048 \ + -pkeyopt rsa_keygen_pubexp:65537 | \ + openssl pkcs8 -topk8 -outform der > rsa-2048-private-key.p8 + + # extract 2048-bit PKI encoded RSA public key from the private key + openssl pkey -pubout -inform der -outform der \ + -in rsa-2048-private-key.p8 \ + -out rsa-2048-public-key.spki + +Note: As of February 2020, Golang's official library does not support passcode-encrypted PKCS8 private key. +For security purposes, Snowflake highly recommends that you store the passcode-encrypted private key on the disk and +decrypt the key in your application using a library you trust. + + +Executing Multiple Statements in One Call + +This feature is available in version 1.3.8 or later of the driver. + +By default, Snowflake returns an error for queries issued with multiple statements. +This restriction helps protect against SQL Injection attacks (https://en.wikipedia.org/wiki/SQL_injection). + +The multi-statement feature allows users skip this restriction and execute multiple SQL statements through a +single Golang function call. However, this opens up the possibility for SQL injection, so it should be used carefully. +The risk can be reduced by specifying the exact number of statements to be executed, which makes it more difficult to +inject a statement by appending it. More details are below. + +The Go Snowflake Driver provides two functions that can execute multiple SQL statements in a single call: + + * db.QueryContext(): This function is used to execute queries, such as SELECT statements, that return a result set. + * db.ExecContext(): This function is used to execute statements that don't return a result set (i.e. most DML and DDL statements). + +To compose a multi-statement query, simply create a string that contains all the queries, separated by semicolons, +in the order in which the statements should be executed. + + +To protect against SQL Injection attacks while using the multi-statement feature, pass a Context that specifies +the number of statements in the string. For example: + + + import ( + "context" + "database/sql" + ) + + var multi_statement_query = "SELECT c1 FROM t1; SELECT c2 FROM t2" + var number_of_statements = 2 + blank_context = context.Background() + multi_statement_context, _ := WithMultiStatement(blank_context, number_of_statements) + rows, err := db.QueryContext(multi_statement_context, multi_statement_query) + + +When multiple queries are executed by a single call to QueryContext(), multiple result sets are returned. After +you process the first result set, get the next result set (for the next SQL statement) by calling NextResultSet(). + +The following pseudo-code shows how to process multiple result sets: + + Execute the statement and get the result set(s): + + + rows, err := db.QueryContext(ctx, multiStmtQuery) + + Retrieve the rows in the first query's result set: + + while rows.Next() { + err = rows.Scan(&variable_1) + if err != nil { + t.Errorf("failed to scan: %#v", err) + } + ... + } + + Retrieve the remaining result sets and the rows in them: + + while rows.NextResultSet() { + + while rows.Next() { + ... + } + + } + +The function db.execContext() returns a single result, which is the sum of the results of the individual statements. +For example, if your multi-statement query executed two UPDATE statements, each of which updated 10 rows, +then the result returned would be 20. Individual results for individual statements are not available. + +The following code shows how to retrieve the result of a multi-statement query executed through db.ExecContext(): + + Execute the SQL statements: + + res, err := db.ExecContext(ctx, multiStmtQuery) + + Get the summed result and store it in the variable named count: + + count, err := res.RowsAffected() + + +Note: Because a multi-statement ExecContext() returns a single value, you cannot detect offsetting errors. +For example, suppose you expected the return value to be 20 because you expected each UPDATE statement to +update 10 rows. If one UPDATE statement updated 15 rows and the other UPDATE statement updated only 5 +rows, the total would still be 20. You would see no indication that the UPDATES had not functioned as +expected. + + +The ExecContext() function does not return an error if passed a query (e.g. a SELECT statement). However, it +still returns only a single value, not a result set, so using it to execute queries (or a mix of queries and non-query +statements) is impractical. + +The QueryContext() function does not return an error if passed non-query statements (e.g. DML). The function +returns a result set for each statement, whether or not the statement is a query. For each non-query statement, the +result set contains a single row that contains a single column; the value is the number of rows changed by the +statement. + +If you want to execute a mix of query and non-query statements (e.g. a mix of SELECT and DML statements) in a +multi-statement query, use QueryContext(). You can retrieve the result sets for the queries, +and you can retrieve or ignore the row counts for the non-query statements. + + +If any of the SQL statements fail to compile or execute, execution is aborted. Any previous statements that ran before are unaffected. + +For example, if the statements below are run as one multi-statement query, the multi-statement query fails on the +third statement, and an exception is thrown. + + + CREATE OR REPLACE TABLE test(n int); + INSERT INTO TEST VALUES (1), (2); + INSERT INTO TEST VALUES ('not_an_integer'); -- execution fails here + INSERT INTO TEST VALUES (3); + +If you then query the contents of the table named "test", the values 1 and 2 would be present. + +When using the QueryContext() and ExecContext() functions, golang code can check for errors the usual way. For +example: + + rows, err := db.QueryContext(ctx, multiStmtQuery) + if err != nil { + Fatalf("failed to query multiple statements: %v", err) + } + +Preparing statements and using bind variables are also not supported for multi-statement queries. + + +Limitations + +GET and PUT operations are unsupported. +*/ +package gosnowflake diff --git a/vendor/github.com/snowflakedb/gosnowflake/driver.go b/vendor/github.com/snowflakedb/gosnowflake/driver.go new file mode 100644 index 000000000000..a9982fa63d84 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/driver.go @@ -0,0 +1,112 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "context" + "database/sql" + "database/sql/driver" + "net/http" +) + +// SnowflakeDriver is a context of Go Driver +type SnowflakeDriver struct{} + +// Open creates a new connection. +func (d SnowflakeDriver) Open(dsn string) (driver.Conn, error) { + glog.V(2).Info("Open") + var err error + sc := &snowflakeConn{ + SequenceCounter: 0, + } + ctx := context.TODO() + sc.cfg, err = ParseDSN(dsn) + if err != nil { + sc.cleanup() + return nil, err + } + st := SnowflakeTransport + if sc.cfg.InsecureMode { + // no revocation check with OCSP. Think twice when you want to enable this option. + st = snowflakeInsecureTransport + } else { + // set OCSP fail open mode + ocspResponseCacheLock.Lock() + ocspFailOpen = sc.cfg.OCSPFailOpen + ocspResponseCacheLock.Unlock() + } + // authenticate + sc.rest = &snowflakeRestful{ + Host: sc.cfg.Host, + Port: sc.cfg.Port, + Protocol: sc.cfg.Protocol, + Client: &http.Client{ + // request timeout including reading response body + Timeout: defaultClientTimeout, + Transport: st, + }, + LoginTimeout: sc.cfg.LoginTimeout, + RequestTimeout: sc.cfg.RequestTimeout, + FuncPost: postRestful, + FuncGet: getRestful, + FuncPostQuery: postRestfulQuery, + FuncPostQueryHelper: postRestfulQueryHelper, + FuncRenewSession: renewRestfulSession, + FuncPostAuth: postAuth, + FuncCloseSession: closeSession, + FuncCancelQuery: cancelQuery, + FuncPostAuthSAML: postAuthSAML, + FuncPostAuthOKTA: postAuthOKTA, + FuncGetSSO: getSSO, + } + var authData *authResponseMain + var samlResponse []byte + var proofKey []byte + + glog.V(2).Infof("Authenticating via %v", sc.cfg.Authenticator.String()) + switch sc.cfg.Authenticator { + case AuthTypeExternalBrowser: + samlResponse, proofKey, err = authenticateByExternalBrowser( + ctx, + sc.rest, + sc.cfg.Authenticator.String(), + sc.cfg.Application, + sc.cfg.Account, + sc.cfg.User, + sc.cfg.Password) + if err != nil { + sc.cleanup() + return nil, err + } + case AuthTypeOkta: + samlResponse, err = authenticateBySAML( + ctx, + sc.rest, + sc.cfg.OktaURL, + sc.cfg.Application, + sc.cfg.Account, + sc.cfg.User, + sc.cfg.Password) + if err != nil { + sc.cleanup() + return nil, err + } + } + authData, err = authenticate( + ctx, + sc, + samlResponse, + proofKey) + if err != nil { + sc.cleanup() + return nil, err + } + + sc.populateSessionParameters(authData.Parameters) + sc.startHeartBeat() + return sc, nil +} + +func init() { + sql.Register("snowflake", &SnowflakeDriver{}) +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/dsn.go b/vendor/github.com/snowflakedb/gosnowflake/dsn.go new file mode 100644 index 000000000000..becd6e84514b --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/dsn.go @@ -0,0 +1,604 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "crypto/rsa" + "encoding/base64" + "fmt" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + defaultClientTimeout = 900 * time.Second // Timeout for network round trip + read out http response + defaultLoginTimeout = 60 * time.Second // Timeout for retry for login EXCLUDING clientTimeout + defaultRequestTimeout = 0 * time.Second // Timeout for retry for request EXCLUDING clientTimeout + defaultJWTTimeout = 60 * time.Second + defaultDomain = ".snowflakecomputing.com" +) + +// ConfigBool is a type to represent true or false in the Config +type ConfigBool uint8 + +const ( + configBoolNotSet ConfigBool = iota // Reserved for unset to let default value fall into this category + // ConfigBoolTrue represents true for the config field + ConfigBoolTrue + // ConfigBoolFalse represents false for the config field + ConfigBoolFalse +) + +// Config is a set of configuration parameters +type Config struct { + Account string // Account name + User string // Username + Password string // Password (requires User) + Database string // Database name + Schema string // Schema + Warehouse string // Warehouse + Role string // Role + Region string // Region + + // ValidateDefaultParameters disable the validation checks for Database, Schema, Warehouse and Role + // at the time a connection is established + ValidateDefaultParameters ConfigBool + + Params map[string]*string // other connection parameters + + Protocol string // http or https (optional) + Host string // hostname (optional) + Port int // port (optional) + + Authenticator AuthType // The authenticator type + + Passcode string + PasscodeInPassword bool + + OktaURL *url.URL + + LoginTimeout time.Duration // Login retry timeout EXCLUDING network roundtrip and read out http response + RequestTimeout time.Duration // request retry timeout EXCLUDING network roundtrip and read out http response + JWTExpireTimeout time.Duration // JWT expire after timeout + + Application string // application name. + InsecureMode bool // driver doesn't check certificate revocation status + OCSPFailOpen OCSPFailOpenMode // OCSP Fail Open + + Token string // Token to use for OAuth other forms of token based auth + + PrivateKey *rsa.PrivateKey // Private key used to sign JWT +} + +// ocspMode returns the OCSP mode in string INSECURE, FAIL_OPEN, FAIL_CLOSED +func (c *Config) ocspMode() string { + if c.InsecureMode { + return ocspModeInsecure + } else if c.OCSPFailOpen == ocspFailOpenNotSet || c.OCSPFailOpen == OCSPFailOpenTrue { + // by default or set to true + return ocspModeFailOpen + } + return ocspModeFailClosed +} + +// DSN constructs a DSN for Snowflake db. +func DSN(cfg *Config) (dsn string, err error) { + hasHost := true + if cfg.Host == "" { + hasHost = false + if cfg.Region == "us-west-2" { + cfg.Region = "" + } + if cfg.Region == "" { + cfg.Host = cfg.Account + defaultDomain + } else { + cfg.Host = cfg.Account + "." + cfg.Region + defaultDomain + } + } + // in case account includes region + posDot := strings.Index(cfg.Account, ".") + if posDot > 0 { + if cfg.Region != "" { + return "", ErrInvalidRegion + } + cfg.Region = cfg.Account[posDot+1:] + cfg.Account = cfg.Account[:posDot] + } + err = fillMissingConfigParameters(cfg) + if err != nil { + return "", err + } + params := &url.Values{} + if hasHost && cfg.Account != "" { + // account may not be included in a Host string + params.Add("account", cfg.Account) + } + if cfg.Database != "" { + params.Add("database", cfg.Database) + } + if cfg.Schema != "" { + params.Add("schema", cfg.Schema) + } + if cfg.Warehouse != "" { + params.Add("warehouse", cfg.Warehouse) + } + if cfg.Role != "" { + params.Add("role", cfg.Role) + } + if cfg.Region != "" { + params.Add("region", cfg.Region) + } + if cfg.Authenticator != AuthTypeSnowflake { + if cfg.Authenticator == AuthTypeOkta { + params.Add("authenticator", strings.ToLower(cfg.OktaURL.String())) + } else { + params.Add("authenticator", strings.ToLower(cfg.Authenticator.String())) + } + } + if cfg.Passcode != "" { + params.Add("passcode", cfg.Passcode) + } + if cfg.PasscodeInPassword { + params.Add("passcodeInPassword", strconv.FormatBool(cfg.PasscodeInPassword)) + } + if cfg.LoginTimeout != defaultLoginTimeout { + params.Add("loginTimeout", strconv.FormatInt(int64(cfg.LoginTimeout/time.Second), 10)) + } + if cfg.RequestTimeout != defaultRequestTimeout { + params.Add("requestTimeout", strconv.FormatInt(int64(cfg.RequestTimeout/time.Second), 10)) + } + if cfg.JWTExpireTimeout != defaultJWTTimeout { + params.Add("jwtTimeout", strconv.FormatInt(int64(cfg.JWTExpireTimeout/time.Second), 10)) + } + if cfg.Application != clientType { + params.Add("application", cfg.Application) + } + if cfg.Protocol != "" && cfg.Protocol != "https" { + params.Add("protocol", cfg.Protocol) + } + if cfg.Token != "" { + params.Add("token", cfg.Token) + } + if cfg.Params != nil { + for k, v := range cfg.Params { + params.Add(k, *v) + } + } + if cfg.PrivateKey != nil { + privateKeyInBytes, err := marshalPKCS8PrivateKey(cfg.PrivateKey) + if err != nil { + return "", err + } + keyBase64 := base64.URLEncoding.EncodeToString(privateKeyInBytes) + params.Add("privateKey", keyBase64) + } + if cfg.InsecureMode { + params.Add("insecureMode", strconv.FormatBool(cfg.InsecureMode)) + } + + params.Add("ocspFailOpen", strconv.FormatBool(cfg.OCSPFailOpen != OCSPFailOpenFalse)) + + params.Add("validateDefaultParameters", strconv.FormatBool(cfg.ValidateDefaultParameters != ConfigBoolFalse)) + + dsn = fmt.Sprintf("%v:%v@%v:%v", url.QueryEscape(cfg.User), url.QueryEscape(cfg.Password), cfg.Host, cfg.Port) + if params.Encode() != "" { + dsn += "?" + params.Encode() + } + return +} + +// ParseDSN parses the DSN string to a Config. +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = &Config{ + Params: make(map[string]*string), + Authenticator: AuthTypeSnowflake, // Default to snowflake + } + + // user[:password]@account/database/schema[?param1=value1¶mN=valueN] + // or + // user[:password]@account/database[?param1=value1¶mN=valueN] + // or + // user[:password]@host:port/database/schema?account=user_account[?param1=value1¶mN=valueN] + // or + // host:port/database/schema?account=user_account[?param1=value1¶mN=valueN] + + foundSlash := false + secondSlash := false + done := false + var i int + posQuestion := len(dsn) + for i = len(dsn) - 1; i >= 0; i-- { + switch { + case dsn[i] == '/': + foundSlash = true + + // left part is empty if i <= 0 + var j int + posSecondSlash := i + if i > 0 { + for j = i - 1; j >= 0; j-- { + switch { + case dsn[j] == '/': + // second slash + secondSlash = true + posSecondSlash = j + case dsn[j] == '@': + // username[:password]@... + cfg.User, cfg.Password = parseUserPassword(j, dsn) + } + if dsn[j] == '@' { + break + } + } + + // account or host:port + err = parseAccountHostPort(cfg, j, posSecondSlash, dsn) + if err != nil { + return nil, err + } + } + // [?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + err = parseParams(cfg, i, dsn) + if err != nil { + return + } + if secondSlash { + cfg.Database = dsn[posSecondSlash+1 : i] + cfg.Schema = dsn[i+1 : posQuestion] + } else { + cfg.Database = dsn[posSecondSlash+1 : posQuestion] + } + done = true + case dsn[i] == '?': + posQuestion = i + } + if done { + break + } + } + if !foundSlash { + // no db or schema is specified + var j int + for j = len(dsn) - 1; j >= 0; j-- { + switch { + case dsn[j] == '@': + cfg.User, cfg.Password = parseUserPassword(j, dsn) + case dsn[j] == '?': + posQuestion = j + } + if dsn[j] == '@' { + break + } + } + err = parseAccountHostPort(cfg, j, posQuestion, dsn) + if err != nil { + return nil, err + } + err = parseParams(cfg, posQuestion-1, dsn) + if err != nil { + return + } + } + if cfg.Account == "" && strings.HasSuffix(cfg.Host, defaultDomain) { + posDot := strings.Index(cfg.Host, ".") + if posDot > 0 { + cfg.Account = cfg.Host[:posDot] + } + } + posDot := strings.Index(cfg.Account, ".") + if posDot >= 0 { + cfg.Account = cfg.Account[:posDot] + } + + err = fillMissingConfigParameters(cfg) + if err != nil { + return nil, err + } + + // unescape parameters + var s string + s, err = url.QueryUnescape(cfg.User) + if err != nil { + return nil, err + } + cfg.User = s + s, err = url.QueryUnescape(cfg.Password) + if err != nil { + return nil, err + } + cfg.Password = s + s, err = url.QueryUnescape(cfg.Database) + if err != nil { + return nil, err + } + cfg.Database = s + s, err = url.QueryUnescape(cfg.Schema) + if err != nil { + return nil, err + } + cfg.Schema = s + s, err = url.QueryUnescape(cfg.Role) + if err != nil { + return nil, err + } + cfg.Role = s + s, err = url.QueryUnescape(cfg.Warehouse) + if err != nil { + return nil, err + } + cfg.Warehouse = s + return cfg, nil +} + +func fillMissingConfigParameters(cfg *Config) error { + posDash := strings.LastIndex(cfg.Account, "-") + if posDash > 0 { + if strings.Contains(cfg.Host, ".global.") { + cfg.Account = cfg.Account[:posDash] + } + } + if strings.Trim(cfg.Account, " ") == "" { + return ErrEmptyAccount + } + + if cfg.Authenticator != AuthTypeOAuth && strings.Trim(cfg.User, " ") == "" { + // oauth does not require a username + return ErrEmptyUsername + } + + if cfg.Authenticator != AuthTypeExternalBrowser && + cfg.Authenticator != AuthTypeOAuth && + cfg.Authenticator != AuthTypeJwt && + strings.Trim(cfg.Password, " ") == "" { + // no password parameter is required for EXTERNALBROWSER, OAUTH or JWT. + return ErrEmptyPassword + } + if strings.Trim(cfg.Protocol, " ") == "" { + cfg.Protocol = "https" + } + if cfg.Port == 0 { + cfg.Port = 443 + } + + cfg.Region = strings.Trim(cfg.Region, " ") + if cfg.Region != "" { + // region is specified but not included in Host + i := strings.Index(cfg.Host, defaultDomain) + if i >= 1 { + hostPrefix := cfg.Host[0:i] + if !strings.HasSuffix(hostPrefix, cfg.Region) { + cfg.Host = hostPrefix + "." + cfg.Region + defaultDomain + } + } + } + if cfg.Host == "" { + if cfg.Region != "" { + cfg.Host = cfg.Account + "." + cfg.Region + defaultDomain + } else { + cfg.Host = cfg.Account + defaultDomain + } + } + if cfg.LoginTimeout == 0 { + cfg.LoginTimeout = defaultLoginTimeout + } + if cfg.RequestTimeout == 0 { + cfg.RequestTimeout = defaultRequestTimeout + } + if cfg.JWTExpireTimeout == 0 { + cfg.JWTExpireTimeout = defaultJWTTimeout + } + if strings.Trim(cfg.Application, " ") == "" { + cfg.Application = clientType + } + + if cfg.OCSPFailOpen == ocspFailOpenNotSet { + cfg.OCSPFailOpen = OCSPFailOpenTrue + } + + if cfg.ValidateDefaultParameters == configBoolNotSet { + cfg.ValidateDefaultParameters = ConfigBoolTrue + } + + if strings.HasSuffix(cfg.Host, defaultDomain) && len(cfg.Host) == len(defaultDomain) { + return &SnowflakeError{ + Number: ErrCodeFailedToParseHost, + Message: errMsgFailedToParseHost, + MessageArgs: []interface{}{cfg.Host}, + } + } + return nil +} + +// transformAccountToHost transforms host to accout name +func transformAccountToHost(cfg *Config) (err error) { + if cfg.Port == 0 && !strings.HasSuffix(cfg.Host, defaultDomain) && cfg.Host != "" { + // account name is specified instead of host:port + cfg.Account = cfg.Host + cfg.Host = cfg.Account + defaultDomain + cfg.Port = 443 + posDot := strings.Index(cfg.Account, ".") + if posDot > 0 { + cfg.Region = cfg.Account[posDot+1:] + cfg.Account = cfg.Account[:posDot] + } + } + return nil +} + +// parseAccountHostPort parses the DSN string to attempt to get account or host and port. +func parseAccountHostPort(cfg *Config, posAt, posSlash int, dsn string) (err error) { + // account or host:port + var k int + for k = posAt + 1; k < posSlash; k++ { + if dsn[k] == ':' { + cfg.Port, err = strconv.Atoi(dsn[k+1 : posSlash]) + if err != nil { + err = &SnowflakeError{ + Number: ErrCodeFailedToParsePort, + Message: errMsgFailedToParsePort, + MessageArgs: []interface{}{dsn[k+1 : posSlash]}, + } + return + } + break + } + } + cfg.Host = dsn[posAt+1 : k] + return transformAccountToHost(cfg) +} + +// parseUserPassword parses the DSN string for username and password +func parseUserPassword(posAt int, dsn string) (user, password string) { + var k int + for k = 0; k < posAt; k++ { + if dsn[k] == ':' { + password = dsn[k+1 : posAt] + break + } + } + user = dsn[:k] + return +} + +// parseParams parse parameters +func parseParams(cfg *Config, posQuestion int, dsn string) (err error) { + for j := posQuestion + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + return +} + +// parseDSNParams parses the DSN "query string". Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + glog.V(2).Infof("Query String: %v\n", params) + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + var value string + value, err = url.QueryUnescape(param[1]) + if err != nil { + return err + } + switch param[0] { + // Disable INFILE whitelist / enable all files + case "account": + cfg.Account = value + case "warehouse": + cfg.Warehouse = value + case "database": + cfg.Database = value + case "schema": + cfg.Schema = value + case "role": + cfg.Role = value + case "region": + cfg.Region = value + case "protocol": + cfg.Protocol = value + case "passcode": + cfg.Passcode = value + case "passcodeInPassword": + var vv bool + vv, err = strconv.ParseBool(value) + if err != nil { + return + } + cfg.PasscodeInPassword = vv + case "loginTimeout": + cfg.LoginTimeout, err = parseTimeout(value) + if err != nil { + return + } + case "requestTimeout": + cfg.RequestTimeout, err = parseTimeout(value) + if err != nil { + return + } + case "jwtTimeout": + cfg.JWTExpireTimeout, err = parseTimeout(value) + if err != nil { + return err + } + case "application": + cfg.Application = value + case "authenticator": + err := determineAuthenticatorType(cfg, value) + if err != nil { + return err + } + case "insecureMode": + var vv bool + vv, err = strconv.ParseBool(value) + if err != nil { + return + } + cfg.InsecureMode = vv + case "ocspFailOpen": + var vv bool + vv, err = strconv.ParseBool(value) + if err != nil { + return + } + if vv { + cfg.OCSPFailOpen = OCSPFailOpenTrue + } else { + cfg.OCSPFailOpen = OCSPFailOpenFalse + } + + case "token": + cfg.Token = value + case "privateKey": + var decodeErr error + block, decodeErr := base64.URLEncoding.DecodeString(value) + if decodeErr != nil { + err = &SnowflakeError{ + Number: ErrCodePrivateKeyParseError, + Message: "Base64 decode failed", + } + return + } + cfg.PrivateKey, err = parsePKCS8PrivateKey(block) + if err != nil { + return err + } + case "validateDefaultParameters": + var vv bool + vv, err = strconv.ParseBool(value) + if err != nil { + return + } + if vv { + cfg.ValidateDefaultParameters = ConfigBoolTrue + } else { + cfg.ValidateDefaultParameters = ConfigBoolFalse + } + default: + if cfg.Params == nil { + cfg.Params = make(map[string]*string) + } + cfg.Params[param[0]] = &value + } + } + return +} + +func parseTimeout(value string) (time.Duration, error) { + var vv int64 + var err error + vv, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return time.Duration(0), err + } + return time.Duration(vv * int64(time.Second)), nil +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/errors.go b/vendor/github.com/snowflakedb/gosnowflake/errors.go new file mode 100644 index 000000000000..1c08710cae23 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/errors.go @@ -0,0 +1,181 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "fmt" +) + +// SnowflakeError is a error type including various Snowflake specific information. +type SnowflakeError struct { + Number int + SQLState string + QueryID string + Message string + MessageArgs []interface{} + IncludeQueryID bool // TODO: populate this in connection +} + +func (se *SnowflakeError) Error() string { + message := se.Message + if len(se.MessageArgs) > 0 { + message = fmt.Sprintf(se.Message, se.MessageArgs...) + } + if se.SQLState != "" { + if se.IncludeQueryID { + return fmt.Sprintf("%06d (%s): %s: %s", se.Number, se.SQLState, se.QueryID, message) + } + return fmt.Sprintf("%06d (%s): %s", se.Number, se.SQLState, message) + } + if se.IncludeQueryID { + return fmt.Sprintf("%06d: %s: %s", se.Number, se.QueryID, message) + } + return fmt.Sprintf("%06d: %s", se.Number, message) +} + +const ( + /* connection */ + + // ErrCodeEmptyAccountCode is an error code for the case where a DNS doesn't include account parameter + ErrCodeEmptyAccountCode = 260000 + // ErrCodeEmptyUsernameCode is an error code for the case where a DNS doesn't include user parameter + ErrCodeEmptyUsernameCode = 260001 + // ErrCodeEmptyPasswordCode is an error code for the case where a DNS doesn't include password parameter + ErrCodeEmptyPasswordCode = 260002 + // ErrCodeFailedToParseHost is an error code for the case where a DNS includes an invalid host name + ErrCodeFailedToParseHost = 260003 + // ErrCodeFailedToParsePort is an error code for the case where a DNS includes an invalid port number + ErrCodeFailedToParsePort = 260004 + // ErrCodeIdpConnectionError is an error code for the case where a IDP connection failed + ErrCodeIdpConnectionError = 260005 + // ErrCodeSSOURLNotMatch is an error code for the case where a SSO URL doesn't match + ErrCodeSSOURLNotMatch = 260006 + // ErrCodeServiceUnavailable is an error code for the case where service is unavailable. + ErrCodeServiceUnavailable = 260007 + // ErrCodeFailedToConnect is an error code for the case where a DB connection failed due to wrong account name + ErrCodeFailedToConnect = 260008 + // ErrCodeRegionOverlap is an error code for the case where a region is specified despite an account region present + ErrCodeRegionOverlap = 260009 + // ErrCodePrivateKeyParseError is an error code for the case where the private key is not parsed correctly + ErrCodePrivateKeyParseError = 260010 + // ErrCodeFailedToParseAuthenticator is an error code for the case where a DNS includes an invalid authenticator + ErrCodeFailedToParseAuthenticator = 260011 + + /* network */ + + // ErrFailedToPostQuery is an error code for the case where HTTP POST failed. + ErrFailedToPostQuery = 261000 + // ErrFailedToRenewSession is an error code for the case where session renewal failed. + ErrFailedToRenewSession = 261001 + // ErrFailedToCancelQuery is an error code for the case where cancel query failed. + ErrFailedToCancelQuery = 261002 + // ErrFailedToCloseSession is an error code for the case where close session failed. + ErrFailedToCloseSession = 261003 + // ErrFailedToAuth is an error code for the case where authentication failed for unknown reason. + ErrFailedToAuth = 261004 + // ErrFailedToAuthSAML is an error code for the case where authentication via SAML failed for unknown reason. + ErrFailedToAuthSAML = 261005 + // ErrFailedToAuthOKTA is an error code for the case where authentication via OKTA failed for unknown reason. + ErrFailedToAuthOKTA = 261006 + // ErrFailedToGetSSO is an error code for the case where authentication via OKTA failed for unknown reason. + ErrFailedToGetSSO = 261007 + // ErrFailedToParseResponse is an error code for when we cannot parse an external browser response from Snowflake. + ErrFailedToParseResponse = 261008 + // ErrFailedToGetExternalBrowserResponse is an error code for when there's an error reading from the open socket. + ErrFailedToGetExternalBrowserResponse = 261009 + // ErrFailedToHeartbeat is an error code when a heartbeat fails. + ErrFailedToHeartbeat = 261010 + + /* rows */ + + // ErrFailedToGetChunk is an error code for the case where it failed to get chunk of result set + ErrFailedToGetChunk = 262000 + + /* transaction*/ + + // ErrNoReadOnlyTransaction is an error code for the case where readonly mode is specified. + ErrNoReadOnlyTransaction = 263000 + // ErrNoDefaultTransactionIsolationLevel is an error code for the case where non default isolation level is specified. + ErrNoDefaultTransactionIsolationLevel = 263001 + + /* converter */ + + // ErrInvalidTimestampTz is an error code for the case where a returned TIMESTAMP_TZ internal value is invalid + ErrInvalidTimestampTz = 268000 + // ErrInvalidOffsetStr is an error code for the case where a offset string is invalid. The input string must + // consist of sHHMI where one sign character '+'/'-' followed by zero filled hours and minutes + ErrInvalidOffsetStr = 268001 + // ErrInvalidBinaryHexForm is an error code for the case where a binary data in hex form is invalid. + ErrInvalidBinaryHexForm = 268002 + + /* OCSP */ + + // ErrOCSPStatusRevoked is an error code for the case where the certificate is revoked. + ErrOCSPStatusRevoked = 269001 + // ErrOCSPStatusUnknown is an error code for the case where the certificate revocation status is unknown. + ErrOCSPStatusUnknown = 269002 + // ErrOCSPInvalidValidity is an error code for the case where the OCSP response validity is invalid. + ErrOCSPInvalidValidity = 269003 + // ErrOCSPNoOCSPResponderURL is an error code for the case where the OCSP responder URL is not attached. + ErrOCSPNoOCSPResponderURL = 269004 + + /* GS error code */ + + // ErrSessionGone is an GS error code for the case that session is already closed + ErrSessionGone = 390111 + // ErrRoleNotExist is a GS error code for the case that the role specified does not exist + ErrRoleNotExist = 390189 + // ErrObjectNotExistOrAuthorized is a GS error code for the case that the server-side object specified does not exist + ErrObjectNotExistOrAuthorized = 390201 +) + +const ( + errMsgFailedToParseHost = "failed to parse a host name. host: %v" + errMsgFailedToParsePort = "failed to parse a port number. port: %v" + errMsgFailedToParseAuthenticator = "failed to parse an authenticator: %v" + errMsgInvalidOffsetStr = "offset must be a string consist of sHHMI where one sign character '+'/'-' followed by zero filled hours and minutes: %v" + errMsgInvalidByteArray = "invalid byte array: %v" + errMsgIdpConnectionError = "failed to verify URLs. authenticator: %v, token URL:%v, SSO URL:%v" + errMsgSSOURLNotMatch = "SSO URL didn't match. expected: %v, got: %v" + errMsgFailedToGetChunk = "failed to get a chunk of result sets. idx: %v" + errMsgFailedToPostQuery = "failed to POST. HTTP: %v, URL: %v" + errMsgFailedToRenew = "failed to renew session. HTTP: %v, URL: %v" + errMsgFailedToCancelQuery = "failed to cancel query. HTTP: %v, URL: %v" + errMsgFailedToCloseSession = "failed to close session. HTTP: %v, URL: %v" + errMsgFailedToAuth = "failed to auth for unknown reason. HTTP: %v, URL: %v" + errMsgFailedToAuthSAML = "failed to auth via SAML for unknown reason. HTTP: %v, URL: %v" + errMsgFailedToAuthOKTA = "failed to auth via OKTA for unknown reason. HTTP: %v, URL: %v" + errMsgFailedToGetSSO = "failed to auth via OKTA for unknown reason. HTTP: %v, URL: %v" + errMsgFailedToParseResponse = "failed to parse a response from Snowflake. Response: %v" + errMsgFailedToGetExternalBrowserResponse = "failed to get an external browser response from Snowflake, err: %s" + errMsgNoReadOnlyTransaction = "no readonly mode is supported" + errMsgNoDefaultTransactionIsolationLevel = "no default isolation transaction level is supported" + errMsgServiceUnavailable = "service is unavailable. check your connectivity. you may need a proxy server. HTTP: %v, URL: %v" + errMsgFailedToConnect = "failed to connect to db. verify account name is correct. HTTP: %v, URL: %v" + errMsgOCSPStatusRevoked = "OCSP revoked: reason:%v, at:%v" + errMsgOCSPStatusUnknown = "OCSP unknown" + errMsgOCSPInvalidValidity = "invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v" + errMsgOCSPNoOCSPResponderURL = "no OCSP server is attached to the certificate. %v" +) + +var ( + // ErrEmptyAccount is returned if a DNS doesn't include account parameter. + ErrEmptyAccount = &SnowflakeError{ + Number: ErrCodeEmptyAccountCode, + Message: "account is empty", + } + // ErrEmptyUsername is returned if a DNS doesn't include user parameter. + ErrEmptyUsername = &SnowflakeError{ + Number: ErrCodeEmptyUsernameCode, + Message: "user is empty", + } + // ErrEmptyPassword is returned if a DNS doesn't include password parameter. + ErrEmptyPassword = &SnowflakeError{ + Number: ErrCodeEmptyPasswordCode, + Message: "password is empty"} + + // ErrInvalidRegion is returned if a DSN's implicit region from account parameter and explicit region parameter conflict. + ErrInvalidRegion = &SnowflakeError{ + Number: ErrCodeRegionOverlap, + Message: "two regions specified"} +) diff --git a/vendor/github.com/snowflakedb/gosnowflake/go.mod b/vendor/github.com/snowflakedb/gosnowflake/go.mod new file mode 100644 index 000000000000..0d0f04645641 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/go.mod @@ -0,0 +1,12 @@ +module github.com/snowflakedb/gosnowflake + +go 1.15 + +require ( + github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 + github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 + github.com/google/uuid v1.1.1 + github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 + github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce + golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 +) diff --git a/vendor/github.com/snowflakedb/gosnowflake/go.sum b/vendor/github.com/snowflakedb/gosnowflake/go.sum new file mode 100644 index 000000000000..9cd646953318 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/go.sum @@ -0,0 +1,27 @@ +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce h1:CGR1hXCOeoZ1aJhCs8qdKJuEu3xoZnxsLcYoh5Bnr+4= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= +github.com/stretchr/testify v1.2.0 h1:LThGCOvhuJic9Gyd1VBCkhyUXmO8vKaBFvBsJ2k03rg= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/snowflakedb/gosnowflake/gosnowflake.mak b/vendor/github.com/snowflakedb/gosnowflake/gosnowflake.mak new file mode 100644 index 000000000000..102934449f71 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/gosnowflake.mak @@ -0,0 +1,42 @@ +## Setup +SHELL := /bin/bash +SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") + +setup: + @which golint &> /dev/null || go get -u golang.org/x/lint/golint + @which make2help &> /dev/null || go get github.com/Songmu/make2help/cmd/make2help + @which staticcheck &> /dev/null || go get honnef.co/go/tools/cmd/staticcheck + +## Install dependencies +deps: setup + go mod tidy + go mod vendor + +## Show help +help: + @make2help $(MAKEFILE_LIST) + +# Format source codes (internally used) +cfmt: setup + @gofmt -l -w $(SRC) + +# Lint (internally used) +clint: deps + @echo "Running staticcheck" && staticcheck + @echo "Running go vet and lint" + @for pkg in $$(go list ./... | grep -v /vendor/); do \ + echo "Verifying $$pkg"; \ + go vet $$pkg; \ + golint -set_exit_status $$pkg || exit $$?; \ + done + +# Install (internally used) +cinstall: + @export GOBIN=$$GOPATH/bin; \ + go install -tags=sfdebug $(CMD_TARGET).go + +# Run (internally used) +crun: install + $(CMD_TARGET) + +.PHONY: setup help cfmt clint cinstall crun diff --git a/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go b/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go new file mode 100644 index 000000000000..b019a101170d --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go @@ -0,0 +1,104 @@ +// Copyright (c) 2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/google/uuid" +) + +const ( + // One hour interval should be good enough to renew tokens for four hours master token validity + heartBeatInterval = 3600 * time.Second +) + +type heartbeat struct { + restful *snowflakeRestful + shutdownChan chan bool +} + +func (hc *heartbeat) run() { + hbTicker := time.NewTicker(heartBeatInterval) + defer hbTicker.Stop() + for { + select { + case <-hbTicker.C: + err := hc.heartbeatMain() + if err != nil { + glog.V(2).Info("failed to heartbeat") + } + case <-hc.shutdownChan: + glog.V(2).Info("stopping heartbeat") + return + } + } +} + +func (hc *heartbeat) start() { + hc.shutdownChan = make(chan bool) + go hc.run() + glog.V(2).Info("heartbeat started") +} + +func (hc *heartbeat) stop() { + hc.shutdownChan <- true + close(hc.shutdownChan) + glog.V(2).Info("heartbeat stopped") +} + +func (hc *heartbeat) heartbeatMain() error { + glog.V(2).Info("Heartbeating!") + params := &url.Values{} + params.Add(requestIDKey, uuid.New().String()) + params.Add(requestGUIDKey, uuid.New().String()) + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake + headers["User-Agent"] = userAgent + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, hc.restful.Token) + + fullURL := hc.restful.getFullURL(heartBeatPath, params) + timeout := hc.restful.RequestTimeout + resp, err := hc.restful.FuncPost(context.Background(), hc.restful, fullURL, headers, nil, timeout, false) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + glog.V(2).Infof("heartbeatMain: resp: %v", resp) + var respd execResponse + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return err + } + if respd.Code == sessionExpiredCode { + err = hc.restful.FuncRenewSession(context.TODO(), hc.restful, timeout) + if err != nil { + return err + } + } + return nil + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) + return err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return &SnowflakeError{ + Number: ErrFailedToHeartbeat, + SQLState: SQLStateConnectionFailure, + Message: "Failed to heartbeat.", + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/location.go b/vendor/github.com/snowflakedb/gosnowflake/location.go new file mode 100644 index 000000000000..7a6ed546c30d --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/location.go @@ -0,0 +1,89 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "fmt" + "strconv" + "sync" + "time" +) + +var timezones map[int]*time.Location +var updateTimezoneMutex *sync.Mutex + +// Location returns an offset (minutes) based Location object for Snowflake database. +func Location(offset int) *time.Location { + updateTimezoneMutex.Lock() + defer updateTimezoneMutex.Unlock() + loc := timezones[offset] + if loc != nil { + return loc + } + loc = genTimezone(offset) + timezones[offset] = loc + return loc +} + +// LocationWithOffsetString returns an offset based Location object. The offset string must consist of sHHMI where one sign +// character '+'/'-' followed by zero filled hours and minutes. +func LocationWithOffsetString(offsets string) (loc *time.Location, err error) { + if len(offsets) != 5 { + return nil, &SnowflakeError{ + Number: ErrInvalidOffsetStr, + SQLState: SQLStateInvalidDataTimeFormat, + Message: errMsgInvalidOffsetStr, + MessageArgs: []interface{}{offsets}, + } + } + if offsets[0] != '-' && offsets[0] != '+' { + return nil, &SnowflakeError{ + Number: ErrInvalidOffsetStr, + SQLState: SQLStateInvalidDataTimeFormat, + Message: errMsgInvalidOffsetStr, + MessageArgs: []interface{}{offsets}, + } + } + s := 1 + if offsets[0] == '-' { + s = -1 + } + var h, m int64 + h, err = strconv.ParseInt(offsets[1:3], 10, 64) + if err != nil { + return + } + m, err = strconv.ParseInt(offsets[3:], 10, 64) + if err != nil { + return + } + offset := s * (int(h)*60 + int(m)) + loc = Location(offset) + return +} + +func genTimezone(offset int) *time.Location { + var offsetSign string + var toffset int + if offset < 0 { + offsetSign = "-" + toffset = -offset + } else { + offsetSign = "+" + toffset = offset + } + glog.V(2).Infof("offset: %v", offset) + return time.FixedZone( + fmt.Sprintf("%v%02d%02d", + offsetSign, toffset/60, toffset%60), int(offset)*60) +} + +func init() { + updateTimezoneMutex = &sync.Mutex{} + timezones = make(map[int]*time.Location, 48) + // pre-generate all common timezones + for i := -720; i <= 720; i += 30 { + glog.V(2).Infof("offset: %v", i) + timezones[i] = genTimezone(i) + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/log.go b/vendor/github.com/snowflakedb/gosnowflake/log.go new file mode 100644 index 000000000000..5236a521ce7c --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/log.go @@ -0,0 +1,40 @@ +// +build !sfdebug + +// Wrapper for glog to replace direct use, so that glog usage remains optional. +// This file contains the no-op glog wrapper/emulator. + +package gosnowflake + +// glogWrapper is an empty struct to create a no-op glog wrapper +type glogWrapper struct{} + +// V emulates the glog.V() call +func (glogWrapper) V(int) glogWrapper { + return glogWrapper{} +} + +// Check if the logging is enabled. Returns always False by default +func (glogWrapper) IsEnabled(int) bool { + return false +} + +// Flush emulates the glog.Flush() call +func (glogWrapper) Flush() {} + +// Info emulates the glog.V(?).Info call +func (glogWrapper) Info(...interface{}) {} + +// Infoln emulates the glog.V(?).Infoln call +func (glogWrapper) Infoln(...interface{}) {} + +// Infof emulates the glog.V(?).Infof call +func (glogWrapper) Infof(...interface{}) {} + +// InfoDepth emulates the glog.V(?).InfoDepth call +func (glogWrapper) InfoDepth(...interface{}) {} + +// NOTE: Warning* and Error* methods are not emulated since they are not used. +// NOTE: Fatal* and Exit* methods are not emulated, since they also require additional calls (like os.Exit() and stack traces) to be compatible. + +// glog is our glog emulator +var glog = glogWrapper{} diff --git a/vendor/github.com/snowflakedb/gosnowflake/log_debug.go b/vendor/github.com/snowflakedb/gosnowflake/log_debug.go new file mode 100644 index 000000000000..11cfaa250e03 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/log_debug.go @@ -0,0 +1,30 @@ +// +build sfdebug + +// Wrapper for glog to replace direct use, so that glog usage remains optional. +// This file contains the actual/operational glog wrapper. + +package gosnowflake + +import logger "github.com/snowflakedb/glog" + +// glogWrapper wraps glog's Verbose type, enabling the use of glog.V().* methods directly +type glogWrapper struct { + logger.Verbose +} + +// V provides a wrapper for the glog.V() call +func (l *glogWrapper) V(level int32) glogWrapper { + return glogWrapper{logger.V(logger.Level(level))} +} + +func (l *glogWrapper) IsEnabled(level int32) bool { + return bool(logger.V(logger.Level(level))) +} + +// Flush calls flush on the underlying logger +func (l *glogWrapper) Flush() { + logger.Flush() +} + +// glog is our glog wrapper +var glog = glogWrapper{} diff --git a/vendor/github.com/snowflakedb/gosnowflake/ocsp.go b/vendor/github.com/snowflakedb/gosnowflake/ocsp.go new file mode 100644 index 000000000000..e90ec9967769 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/ocsp.go @@ -0,0 +1,958 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "bufio" + "context" + "crypto" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "golang.org/x/crypto/ocsp" + "io" + "io/ioutil" + "math/big" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// caRoot includes the CA certificates. +var caRoot map[string]*x509.Certificate + +// certPOol includes the CA certificates. +var certPool *x509.CertPool + +// cacheDir is the location of OCSP response cache file +var cacheDir = "" + +// cacheFileName is the file name of OCSP response cache file +var cacheFileName = "" + +// cacheUpdated is true if the memory cache is updated +var cacheUpdated = true + +// OCSPFailOpenMode is OCSP fail open mode. OCSPFailOpenTrue by default and may set to ocspModeFailClosed for fail closed mode +type OCSPFailOpenMode uint8 + +const ( + ocspFailOpenNotSet OCSPFailOpenMode = iota + // OCSPFailOpenTrue represents OCSP fail open mode. + OCSPFailOpenTrue + // OCSPFailOpenFalse represents OCSP fail closed mode. + OCSPFailOpenFalse +) +const ( + ocspModeFailOpen = "FAIL_OPEN" + ocspModeFailClosed = "FAIL_CLOSED" + ocspModeInsecure = "INSECURE" +) + +// OCSP fail open mode +var ocspFailOpen = OCSPFailOpenTrue + +const ( + // defaultOCSPCacheServerTimeout is the total timeout for OCSP cache server. + defaultOCSPCacheServerTimeout = 5 * time.Second + + // defaultOCSPResponderTimeout is the total timeout for OCSP responder. + defaultOCSPResponderTimeout = 10 * time.Second +) + +const ( + cacheFileBaseName = "ocsp_response_cache.json" + // cacheExpire specifies cache data expiration time in seconds. + cacheExpire = float64(24 * 60 * 60) + cacheServerURL = "http://ocsp.snowflakecomputing.com" + cacheServerEnabledEnv = "SF_OCSP_RESPONSE_CACHE_SERVER_ENABLED" + cacheServerURLEnv = "SF_OCSP_RESPONSE_CACHE_SERVER_URL" + cacheDirEnv = "SF_OCSP_RESPONSE_CACHE_DIR" +) + +const ( + ocspTestInjectValidityErrorEnv = "SF_OCSP_TEST_INJECT_VALIDITY_ERROR" + ocspTestInjectUnknownStatusEnv = "SF_OCSP_TEST_INJECT_UNKNOWN_STATUS" + ocspTestResponseCacheServerTimeoutEnv = "SF_OCSP_TEST_OCSP_RESPONSE_CACHE_SERVER_TIMEOUT" + ocspTestResponderTimeoutEnv = "SF_OCSP_TEST_OCSP_RESPONDER_TIMEOUT" + ocspTestResponderURLEnv = "SF_OCSP_RESPONDER_URL" + ocspTestNoOCSPURLEnv = "SF_OCSP_TEST_NO_OCSP_RESPONDER_URL" +) + +const ( + tolerableValidityRatio = 100 // buffer for certificate revocation update time + maxClockSkew = 900 * time.Second // buffer for clock skew +) + +type ocspStatusCode int + +type ocspStatus struct { + code ocspStatusCode + err error +} + +const ( + ocspSuccess ocspStatusCode = 0 + ocspStatusGood ocspStatusCode = -1 + ocspStatusRevoked ocspStatusCode = -2 + ocspStatusUnknown ocspStatusCode = -3 + ocspStatusOthers ocspStatusCode = -4 + ocspNoServer ocspStatusCode = -5 + ocspFailedParseOCSPHost ocspStatusCode = -6 + ocspFailedComposeRequest ocspStatusCode = -7 + ocspFailedDecomposeRequest ocspStatusCode = -8 + ocspFailedSubmit ocspStatusCode = -9 + ocspFailedResponse ocspStatusCode = -10 + ocspFailedExtractResponse ocspStatusCode = -11 + ocspFailedParseResponse ocspStatusCode = -12 + ocspInvalidValidity ocspStatusCode = -13 + ocspMissedCache ocspStatusCode = -14 + ocspCacheExpired ocspStatusCode = -15 + ocspFailedDecodeResponse ocspStatusCode = -16 +) + +// copied from crypto/ocsp.go +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// cache key +type certIDKey struct { + HashAlgorithm crypto.Hash + NameHash string + IssuerKeyHash string + SerialNumber string +} + +var ( + ocspResponseCache map[certIDKey][]interface{} + ocspResponseCacheLock *sync.RWMutex +) + +// copied from crypto/ocsp +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// copied from crypto/ocsp +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + glog.V(0).Infof("no valid OID is found for the hash algorithm. %#v", target) + return nil +} + +func getHashAlgorithmFromOID(target pkix.AlgorithmIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target.Algorithm) { + return hash + } + } + glog.V(0).Infof("no valid hash algorithm is found for the oid. Falling back to SHA1: %#v", target) + return crypto.SHA1 +} + +// calcTolerableValidity returns the maximum validity buffer +func calcTolerableValidity(thisUpdate, nextUpdate time.Time) time.Duration { + return durationMax(time.Duration(nextUpdate.Sub(thisUpdate)/tolerableValidityRatio), maxClockSkew) +} + +// isInValidityRange checks the validity +func isInValidityRange(currTime, thisUpdate, nextUpdate time.Time) bool { + if currTime.Sub(thisUpdate.Add(-maxClockSkew)) < 0 { + return false + } + if nextUpdate.Add(calcTolerableValidity(thisUpdate, nextUpdate)).Sub(currTime) < 0 { + return false + } + return true +} + +func isTestInvalidValidity() bool { + return strings.EqualFold(os.Getenv(ocspTestInjectValidityErrorEnv), "true") +} + +func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { + r, err := ocsp.ParseRequest(ocspReq) + if err != nil { + return nil, &ocspStatus{ + code: ocspFailedDecomposeRequest, + err: err, + } + } + + // encode CertID, used as a key in the cache + encodedCertID := &certIDKey{ + r.HashAlgorithm, + base64.StdEncoding.EncodeToString(r.IssuerNameHash), + base64.StdEncoding.EncodeToString(r.IssuerKeyHash), + r.SerialNumber.String(), + } + return encodedCertID, &ocspStatus{ + code: ocspSuccess, + } +} + +func encodeCertIDKey(certIDKeyBase64 string) *certIDKey { + r, err := base64.StdEncoding.DecodeString(certIDKeyBase64) + if err != nil { + return nil + } + var c certID + rest, err := asn1.Unmarshal(r, &c) + if err != nil { + // error in parsing + return nil + } + if len(rest) > 0 { + // extra bytes to the end + return nil + } + return &certIDKey{ + getHashAlgorithmFromOID(c.HashAlgorithm), + base64.StdEncoding.EncodeToString(c.NameHash), + base64.StdEncoding.EncodeToString(c.IssuerKeyHash), + c.SerialNumber.String(), + } +} + +func decodeCertIDKey(k *certIDKey) string { + serialNumber := new(big.Int) + serialNumber.SetString(k.SerialNumber, 10) + nameHash, err := base64.StdEncoding.DecodeString(k.NameHash) + if err != nil { + return "" + } + issuerKeyHash, err := base64.StdEncoding.DecodeString(k.IssuerKeyHash) + if err != nil { + return "" + } + encodedCertID, err := asn1.Marshal(certID{ + pkix.AlgorithmIdentifier{ + Algorithm: getOIDFromHashAlgorithm(k.HashAlgorithm), + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + nameHash, + issuerKeyHash, + serialNumber, + }) + if err != nil { + return "" + } + return base64.StdEncoding.EncodeToString(encodedCertID) +} + +func checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) *ocspStatus { + ocspResponseCacheLock.RLock() + gotValueFromCache := ocspResponseCache[*encodedCertID] + ocspResponseCacheLock.RUnlock() + + status := extractOCSPCacheResponseValue(gotValueFromCache, subject, issuer) + if !isValidOCSPStatus(status.code) { + deleteOCSPCache(encodedCertID) + } + return status +} + +func deleteOCSPCache(encodedCertID *certIDKey) { + ocspResponseCacheLock.Lock() + delete(ocspResponseCache, *encodedCertID) + cacheUpdated = true + ocspResponseCacheLock.Unlock() +} + +// deleteOCSPCacheAll deletes all entries in the OCSP response cache on memory +func deleteOCSPCacheAll() { + ocspResponseCacheLock.Lock() + defer ocspResponseCacheLock.Unlock() + ocspResponseCache = make(map[certIDKey][]interface{}) +} + +func validateOCSP(ocspRes *ocsp.Response) *ocspStatus { + curTime := time.Now() + + if ocspRes == nil { + return &ocspStatus{ + code: ocspFailedDecomposeRequest, + err: errors.New("OCSP Response is nil"), + } + } + if isTestInvalidValidity() || !isInValidityRange(curTime, ocspRes.ThisUpdate, ocspRes.NextUpdate) { + return &ocspStatus{ + code: ocspInvalidValidity, + err: &SnowflakeError{ + Number: ErrOCSPInvalidValidity, + Message: errMsgOCSPInvalidValidity, + MessageArgs: []interface{}{ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate}, + }, + } + } + if isTestUnknownStatus() { + ocspRes.Status = ocsp.Unknown + } + return returnOCSPStatus(ocspRes) +} + +func returnOCSPStatus(ocspRes *ocsp.Response) *ocspStatus { + switch ocspRes.Status { + case ocsp.Good: + return &ocspStatus{ + code: ocspStatusGood, + err: nil, + } + case ocsp.Revoked: + return &ocspStatus{ + code: ocspStatusRevoked, + err: &SnowflakeError{ + Number: ErrOCSPStatusRevoked, + Message: errMsgOCSPStatusRevoked, + MessageArgs: []interface{}{ocspRes.RevocationReason, ocspRes.RevokedAt}, + }, + } + case ocsp.Unknown: + return &ocspStatus{ + code: ocspStatusUnknown, + err: &SnowflakeError{ + Number: ErrOCSPStatusUnknown, + Message: errMsgOCSPStatusUnknown, + }, + } + default: + return &ocspStatus{ + code: ocspStatusOthers, + err: fmt.Errorf("OCSP others. %v", ocspRes.Status), + } + } +} + +func isTestUnknownStatus() bool { + return strings.EqualFold(os.Getenv(ocspTestInjectUnknownStatusEnv), "true") +} + +func checkOCSPCacheServer( + ctx context.Context, + client clientInterface, + req requestFunc, + ocspServerHost *url.URL, + totalTimeout time.Duration) ( + cacheContent *map[string][]interface{}, + ocspS *ocspStatus) { + var respd map[string][]interface{} + headers := make(map[string]string) + res, err := newRetryHTTP(ctx, client, req, ocspServerHost, headers, totalTimeout).execute() + if err != nil { + glog.V(2).Infof("failed to get OCSP cache from OCSP Cache Server. %v\n", err) + return nil, &ocspStatus{ + code: ocspFailedSubmit, + err: err, + } + } + defer res.Body.Close() + glog.V(2).Infof("StatusCode from OCSP Cache Server: %v\n", res.StatusCode) + if res.StatusCode != http.StatusOK { + return nil, &ocspStatus{ + code: ocspFailedResponse, + err: fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status), + } + } + glog.V(2).Info("reading contents") + + dec := json.NewDecoder(res.Body) + for { + if err := dec.Decode(&respd); err == io.EOF { + break + } else if err != nil { + glog.V(2).Infof("failed to decode OCSP cache. %v\n", err) + return nil, &ocspStatus{ + code: ocspFailedExtractResponse, + err: err, + } + } + } + return &respd, &ocspStatus{ + code: ocspSuccess, + } +} + +// retryOCSP is the second level of retry method if the returned contents are corrupted. It often happens with OCSP +// serer and retry helps. +func retryOCSP( + ctx context.Context, + client clientInterface, + req requestFunc, + ocspHost *url.URL, + headers map[string]string, + reqBody []byte, + issuer *x509.Certificate, + totalTimeout time.Duration) ( + ocspRes *ocsp.Response, + ocspResBytes []byte, + ocspS *ocspStatus) { + multiplier := 1 + if ocspFailOpen == OCSPFailOpenFalse { + multiplier = 3 // up to 3 times for Fail Close mode + } + res, err := newRetryHTTP( + ctx, client, req, ocspHost, headers, + totalTimeout*time.Duration(multiplier)).doPost().setBody(reqBody).execute() + if err != nil { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedSubmit, + err: err, + } + } + defer res.Body.Close() + glog.V(2).Infof("StatusCode from OCSP Server: %v\n", res.StatusCode) + if res.StatusCode != http.StatusOK { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedResponse, + err: fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status), + } + } + glog.V(2).Info("reading contents") + ocspResBytes, err = ioutil.ReadAll(res.Body) + if err != nil { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedExtractResponse, + err: err, + } + } + glog.V(2).Info("parsing OCSP response") + ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) + if err != nil { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedParseResponse, + err: err, + } + } + + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspSuccess, + } +} + +// getRevocationStatus checks the certificate revocation status for subject using issuer certificate. +func getRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate) *ocspStatus { + glog.V(2).Infof("Subject: %v, Issuer: %v\n", subject.Subject, issuer.Subject) + + status, ocspReq, encodedCertID := validateWithCache(subject, issuer) + if isValidOCSPStatus(status.code) { + return status + } + if ocspReq == nil || encodedCertID == nil { + return status + } + glog.V(2).Infof("cache missed\n") + glog.V(2).Infof("OCSP Server: %v\n", subject.OCSPServer) + if len(subject.OCSPServer) == 0 || isTestNoOCSPURL() { + return &ocspStatus{ + code: ocspNoServer, + err: &SnowflakeError{ + Number: ErrOCSPNoOCSPResponderURL, + Message: errMsgOCSPNoOCSPResponderURL, + MessageArgs: []interface{}{subject.Subject}, + }, + } + } + ocspHost := subject.OCSPServer[0] + u, err := url.Parse(ocspHost) + if err != nil { + return &ocspStatus{ + code: ocspFailedParseOCSPHost, + err: fmt.Errorf("failed to parse OCSP server host. %v", ocspHost), + } + } + hostnameStr := os.Getenv(ocspTestResponderURLEnv) + hostname := u.Hostname() + if hostnameStr != "" { + u0, err := url.Parse(hostnameStr) + if err == nil { + hostname = u0.Hostname() + u = u0 + } + } + headers := make(map[string]string) + headers["Content-Type"] = "application/ocsp-request" + headers["Accept"] = "application/ocsp-response" + headers["Content-Length"] = strconv.Itoa(len(ocspReq)) + headers["Host"] = hostname + timeoutStr := os.Getenv(ocspTestResponderTimeoutEnv) + timeout := defaultOCSPResponderTimeout + if timeoutStr != "" { + var timeoutInMilliseconds int + timeoutInMilliseconds, err = strconv.Atoi(timeoutStr) + if err == nil { + timeout = time.Duration(timeoutInMilliseconds) * time.Millisecond + } + } + ocspClient := &http.Client{ + Timeout: timeout, + Transport: snowflakeInsecureTransport, + } + ocspRes, ocspResBytes, ocspS := retryOCSP( + ctx, ocspClient, http.NewRequest, u, headers, ocspReq, issuer, timeout) + if ocspS.code != ocspSuccess { + return ocspS + } + + ret := validateOCSP(ocspRes) + if !isValidOCSPStatus(ret.code) { + return ret // return invalid + } + v := []interface{}{float64(time.Now().UTC().Unix()), base64.StdEncoding.EncodeToString(ocspResBytes)} + ocspResponseCacheLock.Lock() + ocspResponseCache[*encodedCertID] = v + cacheUpdated = true + ocspResponseCacheLock.Unlock() + return ret +} + +func isTestNoOCSPURL() bool { + return strings.EqualFold(os.Getenv(ocspTestNoOCSPURLEnv), "true") +} + +func isValidOCSPStatus(status ocspStatusCode) bool { + return status == ocspStatusGood || status == ocspStatusRevoked || status == ocspStatusUnknown +} + +// verifyPeerCertificate verifies all of certificate revocation status +func verifyPeerCertificate(ctx context.Context, verifiedChains [][]*x509.Certificate) (err error) { + for i := 0; i < len(verifiedChains); i++ { + // Certificate signed by Root CA. This should be one before the last in the Certificate Chain + numberOfNoneRootCerts := len(verifiedChains[i]) - 1 + if !verifiedChains[i][numberOfNoneRootCerts].IsCA || string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer) != string(verifiedChains[i][numberOfNoneRootCerts].RawSubject) { + // Check if the last Non Root Cert is also a CA or is self signed. + // if the last certificate is not, add it to the list + rca := caRoot[string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer)] + if rca == nil { + return fmt.Errorf("failed to find root CA. pkix.name: %v", verifiedChains[i][numberOfNoneRootCerts].Issuer) + } + verifiedChains[i] = append(verifiedChains[i], rca) + numberOfNoneRootCerts++ + } + results := getAllRevocationStatus(ctx, verifiedChains[i]) + if r := canEarlyExitForOCSP(results, numberOfNoneRootCerts); r != nil { + return r.err + } + } + + ocspResponseCacheLock.Lock() + if cacheUpdated { + writeOCSPCacheFile() + } + cacheUpdated = false + ocspResponseCacheLock.Unlock() + return nil +} + +func canEarlyExitForOCSP(results []*ocspStatus, chainSize int) *ocspStatus { + msg := "" + if ocspFailOpen == OCSPFailOpenFalse { + // Fail closed. any error is returned to stop connection + for _, r := range results { + if r.err != nil { + return r + } + } + } else { + // Fail open and all results are valid. + allValid := len(results) == chainSize + for _, r := range results { + if !isValidOCSPStatus(r.code) { + allValid = false + break + } + } + for _, r := range results { + if allValid && r.code == ocspStatusRevoked { + return r + } + if r != nil && r.code != ocspStatusGood && r.err != nil { + msg += "\n" + r.err.Error() + } + } + } + if len(msg) > 0 { + glog.V(1).Infof( + "WARNING!!! Using fail-open to connect. Driver is connecting to an "+ + "HTTPS endpoint without OCSP based Certificate Revocation checking "+ + "as it could not obtain a valid OCSP Response to use from the CA OCSP "+ + "responder. Detail: %v", msg[1:]) + } + return nil +} + +func validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate) bool { + n := len(verifiedChains) - 1 + for j := 0; j < n; j++ { + subject := verifiedChains[j] + issuer := verifiedChains[j+1] + status, _, _ := validateWithCache(subject, issuer) + if !isValidOCSPStatus(status.code) { + return false + } + } + return true +} + +func validateWithCache(subject, issuer *x509.Certificate) (*ocspStatus, []byte, *certIDKey) { + ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) + if err != nil { + glog.V(2).Infof("failed to create OCSP request from the certificates.\n") + return &ocspStatus{ + code: ocspFailedComposeRequest, + err: errors.New("failed to create a OCSP request"), + }, nil, nil + } + encodedCertID, ocspS := extractCertIDKeyFromRequest(ocspReq) + if ocspS.code != ocspSuccess { + glog.V(2).Infof("failed to extract CertID from OCSP Request.\n") + return &ocspStatus{ + code: ocspFailedComposeRequest, + err: errors.New("failed to extract cert ID Key"), + }, ocspReq, nil + } + status := checkOCSPResponseCache(encodedCertID, subject, issuer) + return status, ocspReq, encodedCertID +} + +func downloadOCSPCacheServer() { + if strings.EqualFold(os.Getenv(cacheServerEnabledEnv), "false") { + glog.V(2).Infof("skipping downloading OCSP Cache.") + return + } + ocspCacheServerURL := os.Getenv(cacheServerURLEnv) + if ocspCacheServerURL == "" { + ocspCacheServerURL = fmt.Sprintf("%v/%v", cacheServerURL, cacheFileBaseName) + } + u, err := url.Parse(ocspCacheServerURL) + if err != nil { + return + } + glog.V(2).Infof("downloading OCSP Cache from server %v", ocspCacheServerURL) + timeoutStr := os.Getenv(ocspTestResponseCacheServerTimeoutEnv) + timeout := defaultOCSPCacheServerTimeout + if timeoutStr != "" { + var timeoutInMilliseconds int + timeoutInMilliseconds, err = strconv.Atoi(timeoutStr) + if err == nil { + timeout = time.Duration(timeoutInMilliseconds) * time.Millisecond + } + } + ocspClient := &http.Client{ + Timeout: timeout, + Transport: snowflakeInsecureTransport, + } + ret, ocspStatus := checkOCSPCacheServer(context.TODO(), ocspClient, http.NewRequest, u, timeout) + if ocspStatus.code != ocspSuccess { + return + } + + ocspResponseCacheLock.Lock() + for k, cacheValue := range *ret { + status := extractOCSPCacheResponseValueWithoutSubject(cacheValue) + if !isValidOCSPStatus(status.code) { + continue + } + cacheKey := encodeCertIDKey(k) + ocspResponseCache[*cacheKey] = cacheValue + } + cacheUpdated = true + ocspResponseCacheLock.Unlock() +} + +func getAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certificate) []*ocspStatus { + cached := validateWithCacheForAllCertificates(verifiedChains) + if !cached { + downloadOCSPCacheServer() + } + n := len(verifiedChains) - 1 + results := make([]*ocspStatus, n) + for j := 0; j < n; j++ { + results[j] = getRevocationStatus(ctx, verifiedChains[j], verifiedChains[j+1]) + if !isValidOCSPStatus(results[j].code) { + return results + } + } + return results +} + +// verifyPeerCertificateSerial verifies the certificate revocation status in serial. +func verifyPeerCertificateSerial(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { + overrideCacheDir() + return verifyPeerCertificate(context.TODO(), verifiedChains) +} + +func overrideCacheDir() { + if os.Getenv(cacheDirEnv) != "" { + ocspResponseCacheLock.Lock() + defer ocspResponseCacheLock.Unlock() + createOCSPCacheDir() + } +} + +// initOCSPCache initializes OCSP Response cache file. +func initOCSPCache() { + ocspResponseCache = make(map[certIDKey][]interface{}) + ocspResponseCacheLock = &sync.RWMutex{} + + glog.V(2).Infof("reading OCSP Response cache file. %v\n", cacheFileName) + f, err := os.Open(cacheFileName) + if err != nil { + glog.Infof("failed to open. Ignored. %v\n", err) + return + } + defer f.Close() + + buf := make(map[string][]interface{}) + + r := bufio.NewReader(f) + dec := json.NewDecoder(r) + for { + if err := dec.Decode(&buf); err == io.EOF { + break + } else if err != nil { + glog.V(2).Infof("failed to read. Ignored. %v\n", err) + return + } + } + for k, cacheValue := range buf { + status := extractOCSPCacheResponseValueWithoutSubject(cacheValue) + if !isValidOCSPStatus(status.code) { + continue + } + cacheKey := encodeCertIDKey(k) + ocspResponseCache[*cacheKey] = cacheValue + + } + cacheUpdated = false +} +func extractOCSPCacheResponseValueWithoutSubject(cacheValue []interface{}) *ocspStatus { + return extractOCSPCacheResponseValue(cacheValue, nil, nil) +} + +func extractOCSPCacheResponseValue(cacheValue []interface{}, subject, issuer *x509.Certificate) *ocspStatus { + subjectName := "Unknown" + if subject != nil { + subjectName = subject.Subject.CommonName + } + + curTime := time.Now() + if len(cacheValue) != 2 { + return &ocspStatus{ + code: ocspMissedCache, + err: fmt.Errorf("miss cache data. subject: %v", subjectName), + } + } + if ts, ok := cacheValue[0].(float64); ok { + currentTime := float64(curTime.UTC().Unix()) + if currentTime-ts >= cacheExpire { + return &ocspStatus{ + code: ocspCacheExpired, + err: fmt.Errorf("cache expired. current: %v, cache: %v", + time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(ts), 0).UTC()), + } + } + } else { + return &ocspStatus{ + code: ocspFailedDecodeResponse, + err: errors.New("the first cache element is not float64"), + } + } + var err error + var r *ocsp.Response + if s, ok := cacheValue[1].(string); ok { + var b []byte + b, err = base64.StdEncoding.DecodeString(s) + if err != nil { + return &ocspStatus{ + code: ocspFailedDecodeResponse, + err: fmt.Errorf("failed to decode OCSP Response value in a cache. subject: %v, err: %v", subjectName, err), + } + } + // check the revocation status here + r, err = ocsp.ParseResponse(b, issuer) + if err != nil { + glog.V(2).Infof("the second cache element is not a valid OCSP Response. Ignored. subject: %v\n", subjectName) + return &ocspStatus{ + code: ocspFailedParseResponse, + err: fmt.Errorf("failed to parse OCSP Respose. subject: %v, err: %v", subjectName, err), + } + } + } else { + return &ocspStatus{ + code: ocspFailedDecodeResponse, + err: errors.New("the second cache element is not string"), + } + + } + return validateOCSP(r) +} + +// writeOCSPCacheFile writes a OCSP Response cache file. This is called if all revocation status is success. +// lock file is used to mitigate race condition with other process. +func writeOCSPCacheFile() { + glog.V(2).Infof("writing OCSP Response cache file. %v\n", cacheFileName) + cacheLockFileName := cacheFileName + ".lck" + err := os.Mkdir(cacheLockFileName, 0600) + switch { + case os.IsExist(err): + statinfo, err := os.Stat(cacheLockFileName) + if err != nil { + glog.V(2).Infof("failed to write OCSP response cache file. file: %v, err: %v. ignored.\n", cacheFileName, err) + return + } + if time.Since(statinfo.ModTime()) < 15*time.Minute { + glog.V(2).Infof("other process locks the cache file. %v. ignored.\n", cacheFileName) + return + } + err = os.Remove(cacheLockFileName) + if err != nil { + glog.V(2).Infof("failed to delete lock file. file: %v, err: %v. ignored.\n", cacheLockFileName, err) + return + } + err = os.Mkdir(cacheLockFileName, 0600) + if err != nil { + glog.V(2).Infof("failed to delete lock file. file: %v, err: %v. ignored.\n", cacheLockFileName, err) + return + } + } + defer os.RemoveAll(cacheLockFileName) + + buf := make(map[string][]interface{}) + for k, v := range ocspResponseCache { + cacheKeyInBase64 := decodeCertIDKey(&k) + buf[cacheKeyInBase64] = v + } + + j, err := json.Marshal(buf) + if err != nil { + glog.V(2).Info("failed to convert OCSP Response cache to JSON. ignored.") + return + } + err = ioutil.WriteFile(cacheFileName, j, 0644) + if err != nil { + glog.V(2).Infof("failed to write OCSP Response cache. err: %v. ignored.\n", err) + } +} + +// readCACerts read a set of root CAs +func readCACerts() { + raw := []byte(caRootPEM) + certPool = x509.NewCertPool() + caRoot = make(map[string]*x509.Certificate) + var p *pem.Block + for { + p, raw = pem.Decode(raw) + if p == nil { + break + } + if p.Type != "CERTIFICATE" { + continue + } + c, err := x509.ParseCertificate(p.Bytes) + if err != nil { + panic("failed to parse CA certificate.") + } + certPool.AddCert(c) + caRoot[string(c.RawSubject)] = c + } +} + +// createOCSPCacheDir creates OCSP response cache directory and set the cache file name. +func createOCSPCacheDir() { + cacheDir = os.Getenv(cacheDirEnv) + if cacheDir == "" { + cacheDir = os.Getenv("SNOWFLAKE_TEST_WORKSPACE") + } + if cacheDir == "" { + switch runtime.GOOS { + case "windows": + cacheDir = filepath.Join(os.Getenv("USERPROFILE"), "AppData", "Local", "Snowflake", "Caches") + case "darwin": + home := os.Getenv("HOME") + if home == "" { + glog.V(2).Info("HOME is blank.") + } + cacheDir = filepath.Join(home, "Library", "Caches", "Snowflake") + default: + home := os.Getenv("HOME") + if home == "" { + glog.V(2).Info("HOME is blank") + } + cacheDir = filepath.Join(home, ".cache", "snowflake") + } + } + + if _, err := os.Stat(cacheDir); os.IsNotExist(err) { + err := os.MkdirAll(cacheDir, os.ModePerm) + if err != nil { + glog.V(2).Infof("failed to create cache directory. %v, err: %v. ignored\n", cacheDir, err) + } + } + cacheFileName = filepath.Join(cacheDir, cacheFileBaseName) + glog.V(2).Infof("reset OCSP cache file. %v", cacheFileName) +} + +// deleteOCSPCacheFile deletes the OCSP response cache file +func deleteOCSPCacheFile() { + _ = os.Remove(cacheFileName) +} + +func init() { + readCACerts() + createOCSPCacheDir() + initOCSPCache() +} + +// snowflakeInsecureTransport is the transport object that doesn't do certificate revocation check. +var snowflakeInsecureTransport = &http.Transport{ + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Minute, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, +} + +// SnowflakeTransport includes the certificate revocation check with OCSP in sequential. By default, the driver uses +// this transport object. +var SnowflakeTransport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: certPool, + VerifyPeerCertificate: verifyPeerCertificateSerial, + }, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Minute, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, +} + +// SnowflakeTransportTest includes the certificate revocation check in parallel +var SnowflakeTransportTest = SnowflakeTransport diff --git a/vendor/github.com/snowflakedb/gosnowflake/parameters.json.tmpl b/vendor/github.com/snowflakedb/gosnowflake/parameters.json.tmpl new file mode 100644 index 000000000000..8448f605d5e8 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/parameters.json.tmpl @@ -0,0 +1,11 @@ +{ + "testconnection": { + "SNOWFLAKE_TEST_USER": "testuser", + "SNOWFLAKE_TEST_PASSWORD": "testpass", + "SNOWFLAKE_TEST_ACCOUNT": "testaccount", + "SNOWFLAKE_TEST_WAREHOUSE": "testwarehouse", + "SNOWFLAKE_TEST_DATABASE": "testdatabase", + "SNOWFLAKE_TEST_SCHEMA": "testschema", + "SNOWFLAKE_TEST_ROLE": "testrole" + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/priv_key_test_coding_helper.go b/vendor/github.com/snowflakedb/gosnowflake/priv_key_test_coding_helper.go new file mode 100644 index 000000000000..06f0593a8d60 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/priv_key_test_coding_helper.go @@ -0,0 +1,34 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. +// +build go1.10 + +package gosnowflake + +// This file contains coding and decoding functions for rsa private key +// Only golang with version of 1.10 or upper should support this + +import ( + "crypto/rsa" + "crypto/x509" +) + +func parsePKCS8PrivateKey(block []byte) (*rsa.PrivateKey, error) { + privKey, err := x509.ParsePKCS8PrivateKey(block) + if err != nil { + return nil, &SnowflakeError{ + Number: ErrCodePrivateKeyParseError, + Message: "Error decoding private key using PKCS8.", + } + } + return privKey.(*rsa.PrivateKey), nil +} + +func marshalPKCS8PrivateKey(key *rsa.PrivateKey) ([]byte, error) { + keyInBytes, err := x509.MarshalPKCS8PrivateKey(key) + if err != nil { + return nil, &SnowflakeError{ + Number: ErrCodePrivateKeyParseError, + Message: "Error encoding private key using PKCS8."} + } + return keyInBytes, nil + +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/query.go b/vendor/github.com/snowflakedb/gosnowflake/query.go new file mode 100644 index 000000000000..7f70ff0bddc0 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/query.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "time" +) + +const arrowFormat = "arrow" + +type execBindParameter struct { + Type string `json:"type"` + Value interface{} `json:"value"` +} + +type execRequest struct { + SQLText string `json:"sqlText"` + AsyncExec bool `json:"asyncExec"` + SequenceID uint64 `json:"sequenceId"` + IsInternal bool `json:"isInternal"` + Parameters map[string]interface{} `json:"parameters,omitempty"` + Bindings map[string]execBindParameter `json:"bindings,omitempty"` +} + +type execResponseRowType struct { + Name string `json:"name"` + ByteLength int64 `json:"byteLength"` + Length int64 `json:"length"` + Type string `json:"type"` + Precision int64 `json:"precision"` + Scale int64 `json:"scale"` + Nullable bool `json:"nullable"` +} + +type execResponseChunk struct { + URL string `json:"url"` + RowCount int `json:"rowCount"` + UncompressedSize int64 `json:"uncompressedSize"` + CompressedSize int64 `json:"compressedSize"` +} + +// make all data field optional +type execResponseData struct { + // succeed query response data + Parameters []nameValueParameter `json:"parameters,omitempty"` + RowType []execResponseRowType `json:"rowtype,omitempty"` + RowSet [][]*string `json:"rowset,omitempty"` + RowSetBase64 string `json:"rowsetbase64,omitempty"` + Total int64 `json:"total,omitempty"` // java:long + Returned int64 `json:"returned,omitempty"` // java:long + QueryID string `json:"queryId,omitempty"` + SQLState string `json:"sqlState,omitempty"` + DatabaseProvider string `json:"databaseProvider,omitempty"` + FinalDatabaseName string `json:"finalDatabaseName,omitempty"` + FinalSchemaName string `json:"finalSchemaName,omitempty"` + FinalWarehouseName string `json:"finalWarehouseName,omitempty"` + FinalRoleName string `json:"finalRoleName,omitempty"` + NumberOfBinds int `json:"numberOfBinds,omitempty"` // java:int + StatementTypeID int64 `json:"statementTypeId,omitempty"` // java:long + Version int64 `json:"version,omitempty"` // java:long + Chunks []execResponseChunk `json:"chunks,omitempty"` + Qrmk string `json:"qrmk,omitempty"` + ChunkHeaders map[string]string `json:"chunkHeaders,omitempty"` + + // ping pong response data + GetResultURL string `json:"getResultUrl,omitempty"` + ProgressDesc string `json:"progressDesc,omitempty"` + QueryAbortTimeout time.Duration `json:"queryAbortsAfterSecs,omitempty"` + ResultIDs string `json:"resultIds,omitempty"` + ResultTypes string `json:"resultTypes,omitempty"` + QueryResultFormat string `json:"queryResultFormat,omitempty"` +} + +type execResponse struct { + Data execResponseData `json:"Data"` + Message string `json:"message"` + Code string `json:"code"` + Success bool `json:"success"` +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/release.go b/vendor/github.com/snowflakedb/gosnowflake/release.go new file mode 100644 index 000000000000..76f339fb69d2 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/release.go @@ -0,0 +1,5 @@ +// +build !sfdebug + +package gosnowflake + +func debugPanicf(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/snowflakedb/gosnowflake/restful.go b/vendor/github.com/snowflakedb/gosnowflake/restful.go new file mode 100644 index 000000000000..1f9d4e443059 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/restful.go @@ -0,0 +1,449 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/google/uuid" +) + +// HTTP headers +const ( + headerSnowflakeToken = "Snowflake Token=\"%v\"" + headerAuthorizationKey = "Authorization" + + headerContentTypeApplicationJSON = "application/json" + headerAcceptTypeApplicationSnowflake = "application/snowflake" +) + +// Snowflake Server Error code +const ( + sessionExpiredCode = "390112" + queryInProgressCode = "333333" + queryInProgressAsyncCode = "333334" +) + +// Snowflake Server Endpoints +const ( + loginRequestPath = "/session/v1/login-request" + queryRequestPath = "/queries/v1/query-request" + tokenRequestPath = "/session/token-request" + abortRequestPath = "/queries/v1/abort-request" + authenticatorRequestPath = "/session/authenticator-request" + sessionRequestPath = "/session" + heartBeatPath = "/session/heartbeat" +) + +type snowflakeRestful struct { + Host string + Port int + Protocol string + LoginTimeout time.Duration // Login timeout + RequestTimeout time.Duration // request timeout + + Client *http.Client + Token string + MasterToken string + SessionID int + HeartBeat *heartbeat + + Connection *snowflakeConn + FuncPostQuery func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration, *uuid.UUID) (*execResponse, error) + FuncPostQueryHelper func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration, *uuid.UUID) (*execResponse, error) + FuncPost func(context.Context, *snowflakeRestful, *url.URL, map[string]string, []byte, time.Duration, bool) (*http.Response, error) + FuncGet func(context.Context, *snowflakeRestful, *url.URL, map[string]string, time.Duration) (*http.Response, error) + FuncRenewSession func(context.Context, *snowflakeRestful, time.Duration) error + FuncPostAuth func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration) (*authResponse, error) + FuncCloseSession func(context.Context, *snowflakeRestful, time.Duration) error + FuncCancelQuery func(context.Context, *snowflakeRestful, *uuid.UUID, time.Duration) error + + FuncPostAuthSAML func(context.Context, *snowflakeRestful, map[string]string, []byte, time.Duration) (*authResponse, error) + FuncPostAuthOKTA func(context.Context, *snowflakeRestful, map[string]string, []byte, string, time.Duration) (*authOKTAResponse, error) + FuncGetSSO func(context.Context, *snowflakeRestful, *url.Values, map[string]string, string, time.Duration) ([]byte, error) +} + +func (sr *snowflakeRestful) getURL() *url.URL { + return &url.URL{ + Scheme: sr.Protocol, + Host: sr.Host + ":" + strconv.Itoa(sr.Port), + } +} + +func (sr *snowflakeRestful) getFullURL(path string, params *url.Values) *url.URL { + ret := &url.URL{ + Scheme: sr.Protocol, + Host: sr.Host + ":" + strconv.Itoa(sr.Port), + Path: path, + } + if params != nil { + ret.RawQuery = params.Encode() + } + return ret +} + +type renewSessionResponse struct { + Data renewSessionResponseMain `json:"data"` + Message string `json:"message"` + Code string `json:"code"` + Success bool `json:"success"` +} + +type renewSessionResponseMain struct { + SessionToken string `json:"sessionToken"` + ValidityInSecondsST time.Duration `json:"validityInSecondsST"` + MasterToken string `json:"masterToken"` + ValidityInSecondsMT time.Duration `json:"validityInSecondsMT"` + SessionID int `json:"sessionId"` +} + +type cancelQueryResponse struct { + Data interface{} `json:"data"` + Message string `json:"message"` + Code string `json:"code"` + Success bool `json:"success"` +} + +func postRestful( + ctx context.Context, + sr *snowflakeRestful, + fullURL *url.URL, + headers map[string]string, + body []byte, + timeout time.Duration, + raise4XX bool) ( + *http.Response, error) { + return newRetryHTTP( + ctx, sr.Client, http.NewRequest, fullURL, headers, timeout).doPost().setBody(body).doRaise4XX(raise4XX).execute() +} + +func getRestful( + ctx context.Context, + sr *snowflakeRestful, + fullURL *url.URL, + headers map[string]string, + timeout time.Duration) ( + *http.Response, error) { + return newRetryHTTP( + ctx, sr.Client, http.NewRequest, fullURL, headers, timeout).execute() +} + +func postRestfulQuery( + ctx context.Context, + sr *snowflakeRestful, + params *url.Values, + headers map[string]string, + body []byte, + timeout time.Duration, + requestID *uuid.UUID) ( + data *execResponse, err error) { + + data, err = sr.FuncPostQueryHelper(ctx, sr, params, headers, body, timeout, requestID) + + // errors other than context timeout and cancel would be returned to upper layers + if err != context.Canceled && err != context.DeadlineExceeded { + return data, err + } + + err = sr.FuncCancelQuery(context.TODO(), sr, requestID, timeout) + if err != nil { + return nil, err + } + return nil, ctx.Err() +} + +func postRestfulQueryHelper( + ctx context.Context, + sr *snowflakeRestful, + params *url.Values, + headers map[string]string, + body []byte, + timeout time.Duration, + requestID *uuid.UUID) ( + data *execResponse, err error) { + glog.V(2).Infof("params: %v", params) + params.Add(requestIDKey, requestID.String()) + params.Add("clientStartTime", strconv.FormatInt(time.Now().Unix(), 10)) + params.Add(requestGUIDKey, uuid.New().String()) + if sr.Token != "" { + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) + } + fullURL := sr.getFullURL(queryRequestPath, params) + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, false) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + glog.V(2).Infof("postQuery: resp: %v", resp) + var respd execResponse + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return nil, err + } + if respd.Code == sessionExpiredCode { + err = sr.FuncRenewSession(ctx, sr, timeout) + if err != nil { + return nil, err + } + return sr.FuncPostQuery(ctx, sr, params, headers, body, timeout, requestID) + } + + var resultURL string + isSessionRenewed := false + + for isSessionRenewed || respd.Code == queryInProgressCode || + respd.Code == queryInProgressAsyncCode { + if !isSessionRenewed { + resultURL = respd.Data.GetResultURL + } + + glog.V(2).Info("ping pong") + glog.Flush() + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) + fullURL := sr.getFullURL(resultURL, nil) + + resp, err = sr.FuncGet(ctx, sr, fullURL, headers, timeout) + if err != nil { + glog.V(1).Infof("failed to get response. err: %v", err) + glog.Flush() + return nil, err + } + respd = execResponse{} // reset the response + err = json.NewDecoder(resp.Body).Decode(&respd) + resp.Body.Close() + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return nil, err + } + if respd.Code == sessionExpiredCode { + err = sr.FuncRenewSession(ctx, sr, timeout) + if err != nil { + return nil, err + } + isSessionRenewed = true + } else { + isSessionRenewed = false + } + } + return &respd, nil + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) + return nil, err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return nil, &SnowflakeError{ + Number: ErrFailedToPostQuery, + SQLState: SQLStateConnectionFailure, + Message: errMsgFailedToPostQuery, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } +} + +func closeSession(ctx context.Context, sr *snowflakeRestful, timeout time.Duration) error { + glog.V(2).Info("close session") + params := &url.Values{} + params.Add("delete", "true") + params.Add(requestIDKey, uuid.New().String()) + params.Add(requestGUIDKey, uuid.New().String()) + fullURL := sr.getFullURL(sessionRequestPath, params) + + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake + headers["User-Agent"] = userAgent + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) + + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, nil, 5*time.Second, false) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + var respd renewSessionResponse + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return err + } + if !respd.Success && respd.Code != sessionExpiredCode { + c, err := strconv.Atoi(respd.Code) + if err != nil { + return err + } + return &SnowflakeError{ + Number: c, + Message: respd.Message, + } + } + return nil + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) + glog.Flush() + return err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return &SnowflakeError{ + Number: ErrFailedToCloseSession, + SQLState: SQLStateConnectionFailure, + Message: errMsgFailedToCloseSession, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } +} + +func renewRestfulSession(ctx context.Context, sr *snowflakeRestful, timeout time.Duration) error { + glog.V(2).Info("start renew session") + params := &url.Values{} + params.Add(requestIDKey, uuid.New().String()) + params.Add(requestGUIDKey, uuid.New().String()) + fullURL := sr.getFullURL(tokenRequestPath, params) + + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake + headers["User-Agent"] = userAgent + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.MasterToken) + + body := make(map[string]string) + body["oldSessionToken"] = sr.Token + body["requestType"] = "RENEW" + + var reqBody []byte + reqBody, err := json.Marshal(body) + if err != nil { + return err + } + + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqBody, timeout, false) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + var respd renewSessionResponse + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return err + } + if !respd.Success { + c, err := strconv.Atoi(respd.Code) + if err != nil { + return err + } + return &SnowflakeError{ + Number: c, + Message: respd.Message, + } + } + sr.Token = respd.Data.SessionToken + sr.MasterToken = respd.Data.MasterToken + return nil + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) + glog.Flush() + return err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return &SnowflakeError{ + Number: ErrFailedToRenewSession, + SQLState: SQLStateConnectionFailure, + Message: errMsgFailedToRenew, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } +} + +func cancelQuery(ctx context.Context, sr *snowflakeRestful, requestID *uuid.UUID, timeout time.Duration) error { + glog.V(2).Info("cancel query") + params := &url.Values{} + params.Add(requestIDKey, uuid.New().String()) + params.Add(requestGUIDKey, uuid.New().String()) + + fullURL := sr.getFullURL(abortRequestPath, params) + + headers := make(map[string]string) + headers["Content-Type"] = headerContentTypeApplicationJSON + headers["accept"] = headerAcceptTypeApplicationSnowflake + headers["User-Agent"] = userAgent + headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) + + req := make(map[string]string) + req[requestIDKey] = requestID.String() + + reqByte, err := json.Marshal(req) + if err != nil { + return err + } + + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqByte, timeout, false) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + var respd cancelQueryResponse + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + glog.V(1).Infof("failed to decode JSON. err: %v", err) + glog.Flush() + return err + } + if !respd.Success && respd.Code == sessionExpiredCode { + err := sr.FuncRenewSession(ctx, sr, timeout) + if err != nil { + return err + } + return sr.FuncCancelQuery(ctx, sr, requestID, timeout) + } else if respd.Success { + return nil + } else { + c, err := strconv.Atoi(respd.Code) + if err != nil { + return err + } + return &SnowflakeError{ + Number: c, + Message: respd.Message, + } + } + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) + glog.Flush() + return err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return &SnowflakeError{ + Number: ErrFailedToCancelQuery, + SQLState: SQLStateConnectionFailure, + Message: errMsgFailedToCancelQuery, + MessageArgs: []interface{}{resp.StatusCode, fullURL}, + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/result.go b/vendor/github.com/snowflakedb/gosnowflake/result.go new file mode 100644 index 000000000000..6e271fca814b --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/result.go @@ -0,0 +1,26 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +// SnowflakeResult provides the associated query ID +type SnowflakeResult interface { + QueryID() string +} + +type snowflakeResult struct { + affectedRows int64 + insertID int64 // Snowflake doesn't support last insert id + queryID string +} + +func (res *snowflakeResult) LastInsertId() (int64, error) { + return res.insertID, nil +} + +func (res *snowflakeResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} + +func (res *snowflakeResult) QueryID() string { + return res.queryID +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/retry.go b/vendor/github.com/snowflakedb/gosnowflake/retry.go new file mode 100644 index 000000000000..d8ca92b359a3 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/retry.go @@ -0,0 +1,318 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "bytes" + "crypto/x509" + "fmt" + "github.com/google/uuid" + "io" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "context" + + "sync" +) + +var random *rand.Rand + +func init() { + random = rand.New(rand.NewSource(time.Now().UnixNano())) +} + +// requestGUIDKey is attached to every request against Snowflake +const requestGUIDKey string = "request_guid" + +// retryCounterKey is attached to query-request from the second time +const retryCounterKey string = "retryCounter" + +// requestIDKey is attached to all requests to Snowflake +const requestIDKey string = "requestId" + +// This class takes in an url during construction and replace the +// value of request_guid every time the replace() is called +// When the url does not contain request_guid, just return the original +// url +type requestGUIDReplacer interface { + // replace the url with new ID + replace() *url.URL +} + +// Make requestGUIDReplacer given a url string +func newRequestGUIDReplace(urlPtr *url.URL) requestGUIDReplacer { + values, err := url.ParseQuery(urlPtr.RawQuery) + if err != nil { + // nop if invalid query parameters + return &transientReplace{urlPtr} + } + if len(values.Get(requestGUIDKey)) == 0 { + // nop if no request_guid is included. + return &transientReplace{urlPtr} + } + + return &requestGUIDReplace{urlPtr, values} +} + +// this replacer does nothing but replace the url +type transientReplace struct { + urlPtr *url.URL +} + +func (replacer *transientReplace) replace() *url.URL { + return replacer.urlPtr +} + +/* +requestGUIDReplacer is a one-shot object that is created out of the retry loop and +called with replace to change the retry_guid's value upon every retry +*/ +type requestGUIDReplace struct { + urlPtr *url.URL + urlValues url.Values +} + +/** +This function would replace they value of the requestGUIDKey in a url with a newly +generated uuid +*/ +func (replacer *requestGUIDReplace) replace() *url.URL { + replacer.urlValues.Del(requestGUIDKey) + replacer.urlValues.Add(requestGUIDKey, uuid.New().String()) + replacer.urlPtr.RawQuery = replacer.urlValues.Encode() + return replacer.urlPtr +} + +type retryCounterUpdater interface { + replaceOrAdd(retry int) *url.URL +} + +type retryCounterUpdate struct { + urlPtr *url.URL + urlValues url.Values +} + +// this replacer does nothing but replace the url +type transientReplaceOrAdd struct { + urlPtr *url.URL +} + +func (replaceOrAdder *transientReplaceOrAdd) replaceOrAdd(retry int) *url.URL { + return replaceOrAdder.urlPtr +} + +func (replacer *retryCounterUpdate) replaceOrAdd(retry int) *url.URL { + replacer.urlValues.Del(retryCounterKey) + replacer.urlValues.Add(retryCounterKey, strconv.Itoa(retry)) + replacer.urlPtr.RawQuery = replacer.urlValues.Encode() + return replacer.urlPtr +} + +func newRetryUpdate(urlPtr *url.URL) retryCounterUpdater { + if !strings.HasPrefix(urlPtr.Path, queryRequestPath) { + // nop if not query-request + return &transientReplaceOrAdd{urlPtr} + } + values, err := url.ParseQuery(urlPtr.RawQuery) + if err != nil { + // nop if the URL is not valid + return &transientReplaceOrAdd{urlPtr} + } + return &retryCounterUpdate{urlPtr, values} +} + +type waitAlgo struct { + mutex *sync.Mutex // required for random.Int63n + base time.Duration // base wait time + cap time.Duration // maximum wait time +} + +func randSecondDuration(n time.Duration) time.Duration { + return time.Duration(random.Int63n(int64(n/time.Second))) * time.Second +} + +// decorrelated jitter backoff +func (w *waitAlgo) decorr(attempt int, sleep time.Duration) time.Duration { + w.mutex.Lock() + defer w.mutex.Unlock() + t := 3*sleep - w.base + switch { + case t > 0: + return durationMin(w.cap, randSecondDuration(t)+w.base) + case t < 0: + return durationMin(w.cap, randSecondDuration(-t)+3*sleep) + } + return w.base +} + +var defaultWaitAlgo = &waitAlgo{ + mutex: &sync.Mutex{}, + base: 5 * time.Second, + cap: 160 * time.Second, +} + +type requestFunc func(method, urlStr string, body io.Reader) (*http.Request, error) + +type clientInterface interface { + Do(req *http.Request) (*http.Response, error) +} + +type retryHTTP struct { + ctx context.Context + client clientInterface + req requestFunc + method string + fullURL *url.URL + headers map[string]string + body []byte + timeout time.Duration + raise4XX bool +} + +func newRetryHTTP(ctx context.Context, + client clientInterface, + req requestFunc, + fullURL *url.URL, + headers map[string]string, + timeout time.Duration) *retryHTTP { + instance := retryHTTP{} + instance.ctx = ctx + instance.client = client + instance.req = req + instance.method = "GET" + instance.fullURL = fullURL + instance.headers = headers + instance.body = nil + instance.timeout = timeout + instance.raise4XX = false + return &instance +} + +func (r *retryHTTP) doRaise4XX(raise4XX bool) *retryHTTP { + r.raise4XX = raise4XX + return r +} + +func (r *retryHTTP) doPost() *retryHTTP { + r.method = "POST" + return r +} + +func (r *retryHTTP) setBody(body []byte) *retryHTTP { + r.body = body + return r +} + +func (r *retryHTTP) execute() (res *http.Response, err error) { + totalTimeout := r.timeout + glog.V(2).Infof("retryHTTP.totalTimeout: %v", totalTimeout) + retryCounter := 0 + sleepTime := time.Duration(0) + + var rIDReplacer requestGUIDReplacer + var rUpdater retryCounterUpdater + + for { + req, err := r.req(r.method, r.fullURL.String(), bytes.NewReader(r.body)) + if err != nil { + return nil, err + } + if req != nil { + // req can be nil in tests + req = req.WithContext(r.ctx) + } + for k, v := range r.headers { + req.Header.Set(k, v) + } + res, err = r.client.Do(req) + if err != nil { + // check if it can retry. + doExit, err := r.isRetryableError(err) + if doExit { + return res, err + } + // cannot just return 4xx and 5xx status as the error can be sporadic. run often helps. + glog.V(2).Infof( + "failed http connection. no response is returned. err: %v. retrying...\n", err) + } else { + if res.StatusCode == http.StatusOK || r.raise4XX && res != nil && res.StatusCode >= 400 && res.StatusCode < 500 { + // exit if success + // or + // abort connection if raise4XX flag is enabled and the range of HTTP status code are 4XX. + // This is currently used for Snowflake login. The caller must generate an error object based on HTTP status. + break + } + glog.V(2).Infof( + "failed http connection. HTTP Status: %v. retrying...\n", res.StatusCode) + res.Body.Close() + } + // uses decorrelated jitter backoff + sleepTime = defaultWaitAlgo.decorr(retryCounter, sleepTime) + + if totalTimeout > 0 { + glog.V(2).Infof("to timeout: %v", totalTimeout) + // if any timeout is set + totalTimeout -= sleepTime + if totalTimeout <= 0 { + if err != nil { + return nil, err + } + if res != nil { + return nil, fmt.Errorf("timeout after %s. HTTP Status: %v. Hanging?", r.timeout, res.StatusCode) + } + return nil, fmt.Errorf("timeout after %s. Hanging?", r.timeout) + } + } + retryCounter++ + if rIDReplacer == nil { + rIDReplacer = newRequestGUIDReplace(r.fullURL) + } + r.fullURL = rIDReplacer.replace() + if rUpdater == nil { + rUpdater = newRetryUpdate(r.fullURL) + } + r.fullURL = rUpdater.replaceOrAdd(retryCounter) + glog.V(2).Infof("sleeping %v. to timeout: %v. retrying", sleepTime, totalTimeout) + + await := time.NewTimer(sleepTime) + select { + case <-await.C: + // retry the request + case <-r.ctx.Done(): + await.Stop() + return res, r.ctx.Err() + } + } + return res, err +} + +func (r *retryHTTP) isRetryableError(err error) (bool, error) { + urlError, isURLError := err.(*url.Error) + if isURLError { + // context cancel or timeout + if urlError.Err == context.DeadlineExceeded || urlError.Err == context.Canceled { + return true, urlError.Err + } + if driverError, ok := urlError.Err.(*SnowflakeError); ok { + // Certificate Revoked + if driverError.Number == ErrOCSPStatusRevoked { + return true, err + } + } + if _, ok := urlError.Err.(x509.CertificateInvalidError); ok { + // Certificate is invalid + return true, err + } + if _, ok := urlError.Err.(x509.UnknownAuthorityError); ok { + // Certificate is self-signed + return true, err + } + + } + return false, err +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/rows.go b/vendor/github.com/snowflakedb/gosnowflake/rows.go new file mode 100644 index 000000000000..b95bfa31ed22 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/rows.go @@ -0,0 +1,512 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "bufio" + "compress/gzip" + "context" + "database/sql/driver" + "encoding/json" + "github.com/apache/arrow/go/arrow/ipc" + "github.com/apache/arrow/go/arrow/memory" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "sync" + "time" +) + +const ( + headerSseCAlgorithm = "x-amz-server-side-encryption-customer-algorithm" + headerSseCKey = "x-amz-server-side-encryption-customer-key" + headerSseCAes = "AES256" +) + +var ( + // MaxChunkDownloadWorkers specifies the maximum number of goroutines used to download chunks + MaxChunkDownloadWorkers = 10 + + // CustomJSONDecoderEnabled has the chunk downloader use the custom JSON decoder to reduce memory footprint. + CustomJSONDecoderEnabled = false +) + +var ( + maxChunkDownloaderErrorCounter = 5 +) + +type snowflakeRows struct { + sc *snowflakeConn + RowType []execResponseRowType + ChunkDownloader *snowflakeChunkDownloader + queryID string +} + +func (rows *snowflakeRows) Close() (err error) { + glog.V(2).Infoln("Rows.Close") + return nil +} + +type snowflakeValue interface{} + +type chunkRowType struct { + RowSet []*string + ArrowRow []snowflakeValue +} + +type rowSetType struct { + RowType []execResponseRowType + JSON [][]*string + RowSetBase64 string +} + +type chunkError struct { + Index int + Error error +} + +type snowflakeChunkDownloader struct { + sc *snowflakeConn + ctx context.Context + Total int64 + TotalRowIndex int64 + CellCount int + CurrentChunk []chunkRowType + CurrentChunkIndex int + CurrentChunkSize int + ChunksMutex *sync.Mutex + ChunkMetas []execResponseChunk + Chunks map[int][]chunkRowType + ChunksChan chan int + ChunksError chan *chunkError + ChunksErrorCounter int + ChunksFinalErrors []*chunkError + Qrmk string + QueryResultFormat string + RowSet rowSetType + ChunkHeader map[string]string + CurrentIndex int + FuncDownload func(context.Context, *snowflakeChunkDownloader, int) + FuncDownloadHelper func(context.Context, *snowflakeChunkDownloader, int) error + FuncGet func(context.Context, *snowflakeChunkDownloader, string, map[string]string, time.Duration) (*http.Response, error) + DoneDownloadCond *sync.Cond + NextDownloader *snowflakeChunkDownloader +} + +// ColumnTypeDatabaseTypeName returns the database column name. +func (rows *snowflakeRows) ColumnTypeDatabaseTypeName(index int) string { + return strings.ToUpper(rows.RowType[index].Type) +} + +// ColumnTypeLength returns the length of the column +func (rows *snowflakeRows) ColumnTypeLength(index int) (length int64, ok bool) { + if index < 0 || index > len(rows.RowType) { + return 0, false + } + switch rows.RowType[index].Type { + case "text", "variant", "object", "array", "binary": + return rows.RowType[index].Length, true + } + return 0, false +} + +func (rows *snowflakeRows) ColumnTypeNullable(index int) (nullable, ok bool) { + if index < 0 || index > len(rows.RowType) { + return false, false + } + return rows.RowType[index].Nullable, true +} + +func (rows *snowflakeRows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + if index < 0 || index > len(rows.RowType) { + return 0, 0, false + } + switch rows.RowType[index].Type { + case "fixed": + return rows.RowType[index].Precision, rows.RowType[index].Scale, true + case "time": + return rows.RowType[index].Scale, 0, true + case "timestamp": + return rows.RowType[index].Scale, 0, true + } + return 0, 0, false +} + +func (rows *snowflakeRows) Columns() []string { + glog.V(3).Infoln("Rows.Columns") + ret := make([]string, len(rows.RowType)) + for i, n := 0, len(rows.RowType); i < n; i++ { + ret[i] = rows.RowType[i].Name + } + return ret +} + +func (rows *snowflakeRows) ColumnTypeScanType(index int) reflect.Type { + return snowflakeTypeToGo(rows.RowType[index].Type, rows.RowType[index].Scale) +} + +func (rows *snowflakeRows) QueryID() string { + return rows.queryID +} + +func (rows *snowflakeRows) Next(dest []driver.Value) (err error) { + row, err := rows.ChunkDownloader.Next() + if err != nil { + // includes io.EOF + if err == io.EOF { + rows.ChunkDownloader.Chunks = nil // detach all chunks. No way to go backward without reinitialize it. + } + return err + } + + if rows.ChunkDownloader.QueryResultFormat == arrowFormat { + for i, n := 0, len(row.ArrowRow); i < n; i++ { + dest[i] = row.ArrowRow[i] + } + } else { + for i, n := 0, len(row.RowSet); i < n; i++ { + // could move to chunk downloader so that each go routine + // can convert data + err := stringToValue(&dest[i], rows.RowType[i], row.RowSet[i]) + if err != nil { + return err + } + } + } + return err +} + +func (rows *snowflakeRows) HasNextResultSet() bool { + if len(rows.ChunkDownloader.ChunkMetas) == 0 && rows.ChunkDownloader.NextDownloader == nil { + return false // no extra chunk + } + return rows.ChunkDownloader.hasNextResultSet() +} + +func (rows *snowflakeRows) NextResultSet() error { + if len(rows.ChunkDownloader.ChunkMetas) == 0 { + if rows.ChunkDownloader.NextDownloader == nil { + return io.EOF + } + rows.ChunkDownloader = rows.ChunkDownloader.NextDownloader + rows.ChunkDownloader.start() + } + return rows.ChunkDownloader.nextResultSet() +} + +func (scd *snowflakeChunkDownloader) totalUncompressedSize() (acc int64) { + for _, c := range scd.ChunkMetas { + acc += c.UncompressedSize + } + return +} + +func (scd *snowflakeChunkDownloader) hasNextResultSet() bool { + // next result set exists if current chunk has remaining result sets or there is another downloader + return scd.CurrentChunkIndex < len(scd.ChunkMetas) || scd.NextDownloader != nil +} + +func (scd *snowflakeChunkDownloader) nextResultSet() error { + // no error at all times as the next chunk/resultset is automatically read + if scd.CurrentChunkIndex < len(scd.ChunkMetas) { + return nil + } + return io.EOF +} + +func (scd *snowflakeChunkDownloader) start() error { + scd.CurrentChunkSize = len(scd.RowSet.JSON) // cache the size + scd.CurrentIndex = -1 // initial chunks idx + scd.CurrentChunkIndex = -1 // initial chunk + + scd.CurrentChunk = make([]chunkRowType, scd.CurrentChunkSize) + populateJSONRowSet(scd.CurrentChunk, scd.RowSet.JSON) + + if scd.QueryResultFormat == arrowFormat && scd.RowSet.RowSetBase64 != "" { + // if the rowsetbase64 retrieved from the server is empty, move on to downloading chunks + var err error + firstArrowChunk := buildFirstArrowChunk(scd.RowSet.RowSetBase64) + scd.CurrentChunk, err = firstArrowChunk.decodeArrowChunk(scd.RowSet.RowType) + scd.CurrentChunkSize = firstArrowChunk.rowCount + if err != nil { + return err + } + } + + // start downloading chunks if exists + chunkMetaLen := len(scd.ChunkMetas) + if chunkMetaLen > 0 { + glog.V(2).Infof("MaxChunkDownloadWorkers: %v", MaxChunkDownloadWorkers) + glog.V(2).Infof("chunks: %v, total bytes: %d", chunkMetaLen, scd.totalUncompressedSize()) + scd.ChunksMutex = &sync.Mutex{} + scd.DoneDownloadCond = sync.NewCond(scd.ChunksMutex) + scd.Chunks = make(map[int][]chunkRowType) + scd.ChunksChan = make(chan int, chunkMetaLen) + scd.ChunksError = make(chan *chunkError, MaxChunkDownloadWorkers) + for i := 0; i < chunkMetaLen; i++ { + glog.V(2).Infof("add chunk to channel ChunksChan: %v", i+1) + scd.ChunksChan <- i + } + for i := 0; i < intMin(MaxChunkDownloadWorkers, chunkMetaLen); i++ { + scd.schedule() + } + } + return nil +} + +func (scd *snowflakeChunkDownloader) schedule() { + select { + case nextIdx := <-scd.ChunksChan: + glog.V(2).Infof("schedule chunk: %v", nextIdx+1) + go scd.FuncDownload(scd.ctx, scd, nextIdx) + default: + // no more download + glog.V(2).Info("no more download") + } +} + +func (scd *snowflakeChunkDownloader) checkErrorRetry() (err error) { + select { + case errc := <-scd.ChunksError: + if scd.ChunksErrorCounter < maxChunkDownloaderErrorCounter && errc.Error != context.Canceled { + // add the index to the chunks channel so that the download will be retried. + go scd.FuncDownload(scd.ctx, scd, errc.Index) + scd.ChunksErrorCounter++ + glog.V(2).Infof("chunk idx: %v, err: %v. retrying (%v/%v)...", + errc.Index, errc.Error, scd.ChunksErrorCounter, maxChunkDownloaderErrorCounter) + } else { + scd.ChunksFinalErrors = append(scd.ChunksFinalErrors, errc) + glog.V(2).Infof("chunk idx: %v, err: %v. no further retry", errc.Index, errc.Error) + return errc.Error + } + default: + glog.V(2).Info("no error is detected.") + } + return nil +} +func (scd *snowflakeChunkDownloader) Next() (chunkRowType, error) { + for { + scd.CurrentIndex++ + if scd.CurrentIndex < scd.CurrentChunkSize { + return scd.CurrentChunk[scd.CurrentIndex], nil + } + scd.CurrentChunkIndex++ // next chunk + scd.CurrentIndex = -1 // reset + if scd.CurrentChunkIndex >= len(scd.ChunkMetas) { + break + } + + scd.ChunksMutex.Lock() + if scd.CurrentChunkIndex > 1 { + scd.Chunks[scd.CurrentChunkIndex-1] = nil // detach the previously used chunk + } + + for scd.Chunks[scd.CurrentChunkIndex] == nil { + glog.V(2).Infof("waiting for chunk idx: %v/%v", + scd.CurrentChunkIndex+1, len(scd.ChunkMetas)) + + err := scd.checkErrorRetry() + if err != nil { + scd.ChunksMutex.Unlock() + return chunkRowType{}, err + } + + // wait for chunk downloader goroutine to broadcast the event, + // 1) one chunk download finishes or 2) an error occurs. + scd.DoneDownloadCond.Wait() + } + glog.V(2).Infof("ready: chunk %v", scd.CurrentChunkIndex+1) + scd.CurrentChunk = scd.Chunks[scd.CurrentChunkIndex] + scd.ChunksMutex.Unlock() + scd.CurrentChunkSize = len(scd.CurrentChunk) + + // kick off the next download + scd.schedule() + } + + glog.V(2).Infof("no more data") + if len(scd.ChunkMetas) > 0 { + close(scd.ChunksError) + close(scd.ChunksChan) + } + return chunkRowType{}, io.EOF +} + +func getChunk( + ctx context.Context, + scd *snowflakeChunkDownloader, + fullURL string, + headers map[string]string, + timeout time.Duration) ( + *http.Response, error) { + u, err := url.Parse(fullURL) + if err != nil { + return nil, err + } + return newRetryHTTP(ctx, scd.sc.rest.Client, http.NewRequest, u, headers, timeout).execute() +} + +/* largeResultSetReader is a reader that wraps the large result set with leading and tailing brackets. */ +type largeResultSetReader struct { + status int + body io.Reader +} + +func (r *largeResultSetReader) Read(p []byte) (n int, err error) { + if r.status == 0 { + p[0] = 0x5b // initial 0x5b ([) + r.status = 1 + return 1, nil + } + if r.status == 1 { + var len int + len, err = r.body.Read(p) + if err == io.EOF { + r.status = 2 + return len, nil + } + if err != nil { + return 0, err + } + return len, nil + } + if r.status == 2 { + p[0] = 0x5d // tail 0x5d (]) + r.status = 3 + return 1, nil + } + // ensure no data and EOF + return 0, io.EOF +} + +func downloadChunk(ctx context.Context, scd *snowflakeChunkDownloader, idx int) { + glog.V(2).Infof("download start chunk: %v", idx+1) + defer scd.DoneDownloadCond.Broadcast() + + if err := scd.FuncDownloadHelper(ctx, scd, idx); err != nil { + glog.V(1).Infof( + "failed to extract HTTP response body. URL: %v, err: %v", scd.ChunkMetas[idx].URL, err) + glog.Flush() + scd.ChunksError <- &chunkError{Index: idx, Error: err} + } else if scd.ctx.Err() == context.Canceled || scd.ctx.Err() == context.DeadlineExceeded { + scd.ChunksError <- &chunkError{Index: idx, Error: scd.ctx.Err()} + } +} + +func downloadChunkHelper(ctx context.Context, scd *snowflakeChunkDownloader, idx int) error { + headers := make(map[string]string) + if len(scd.ChunkHeader) > 0 { + glog.V(2).Info("chunk header is provided.") + for k, v := range scd.ChunkHeader { + headers[k] = v + } + } else { + headers[headerSseCAlgorithm] = headerSseCAes + headers[headerSseCKey] = scd.Qrmk + } + + resp, err := scd.FuncGet(ctx, scd, scd.ChunkMetas[idx].URL, headers, scd.sc.rest.RequestTimeout) + if err != nil { + return err + } + bufStream := bufio.NewReader(resp.Body) + defer resp.Body.Close() + glog.V(2).Infof("response returned chunk: %v, resp: %v", idx+1, resp) + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(bufStream) + if err != nil { + return err + } + glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, scd.ChunkMetas[idx].URL, b) + glog.V(1).Infof("Header: %v", resp.Header) + glog.Flush() + return &SnowflakeError{ + Number: ErrFailedToGetChunk, + SQLState: SQLStateConnectionFailure, + Message: errMsgFailedToGetChunk, + MessageArgs: []interface{}{idx}, + } + } + return decodeChunk(scd, idx, bufStream) +} + +func decodeChunk(scd *snowflakeChunkDownloader, idx int, bufStream *bufio.Reader) (err error) { + gzipMagic, err := bufStream.Peek(2) + if err != nil { + return err + } + start := time.Now() + var source io.Reader + if gzipMagic[0] == 0x1f && gzipMagic[1] == 0x8b { + // detects and uncompresses Gzip format data + bufStream0, err := gzip.NewReader(bufStream) + if err != nil { + return err + } + defer bufStream0.Close() + source = bufStream0 + } else { + source = bufStream + } + st := &largeResultSetReader{ + status: 0, + body: source, + } + var respd []chunkRowType + if scd.QueryResultFormat != arrowFormat { + var decRespd [][]*string + if !CustomJSONDecoderEnabled { + dec := json.NewDecoder(st) + for { + if err := dec.Decode(&decRespd); err == io.EOF { + break + } else if err != nil { + return err + } + } + } else { + decRespd, err = decodeLargeChunk(st, scd.ChunkMetas[idx].RowCount, scd.CellCount) + if err != nil { + return err + } + } + respd = make([]chunkRowType, len(decRespd)) + populateJSONRowSet(respd, decRespd) + } else { + ipcReader, err := ipc.NewReader(source) + if err != nil { + return err + } + arc := arrowResultChunk{ + *ipcReader, + 0, + int(scd.totalUncompressedSize()), + memory.NewGoAllocator(), + } + respd, err = arc.decodeArrowChunk(scd.RowSet.RowType) + if err != nil { + return err + } + } + glog.V(2).Infof( + "decoded %d rows w/ %d bytes in %s (chunk %v)", + scd.ChunkMetas[idx].RowCount, + scd.ChunkMetas[idx].UncompressedSize, + time.Since(start), idx+1, + ) + + scd.ChunksMutex.Lock() + defer scd.ChunksMutex.Unlock() + scd.Chunks[idx] = respd + return nil +} + +func populateJSONRowSet(dst []chunkRowType, src [][]*string) { + // populate string rowset from src to dst's chunkRowType struct's RowSet field + for i, row := range src { + dst[i].RowSet = row + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/sqlstate.go b/vendor/github.com/snowflakedb/gosnowflake/sqlstate.go new file mode 100644 index 000000000000..3a154759c927 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/sqlstate.go @@ -0,0 +1,18 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +const ( + // SQLStateNumericValueOutOfRange is a SQL State code indicating Numeric value is out of range. + SQLStateNumericValueOutOfRange = "22003" + // SQLStateInvalidDataTimeFormat is a SQL State code indicating DataTime format is invalid. + SQLStateInvalidDataTimeFormat = "22007" + // SQLStateConnectionWasNotEstablished is a SQL State code indicating connection was not established. + SQLStateConnectionWasNotEstablished = "08001" + // SQLStateConnectionRejected is a SQL State code indicating connection was rejected. + SQLStateConnectionRejected = "08004" + // SQLStateConnectionFailure is a SQL State code indicating connection failed. + SQLStateConnectionFailure = "08006" + // SQLStateFeatureNotSupported is a SQL State code indicating the feature is not enabled. + SQLStateFeatureNotSupported = "0A000" +) diff --git a/vendor/github.com/snowflakedb/gosnowflake/statement.go b/vendor/github.com/snowflakedb/gosnowflake/statement.go new file mode 100644 index 000000000000..ee3d4ca3532e --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/statement.go @@ -0,0 +1,57 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "context" + "database/sql/driver" +) + +type paramKey string + +const ( + // MultiStatementCount controls the number of queries to execute in a single API call + MultiStatementCount paramKey = "MULTI_STATEMENT_COUNT" +) + +type snowflakeStmt struct { + sc *snowflakeConn + query string +} + +func (stmt *snowflakeStmt) Close() error { + glog.V(2).Infoln("Stmt.Close") + // noop + return nil +} + +func (stmt *snowflakeStmt) NumInput() int { + glog.V(2).Infoln("Stmt.NumInput") + // Go Snowflake doesn't know the number of binding parameters. + return -1 +} + +func (stmt *snowflakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + glog.V(2).Infoln("Stmt.ExecContext") + return stmt.sc.ExecContext(ctx, stmt.query, args) +} + +func (stmt *snowflakeStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + glog.V(2).Infoln("Stmt.QueryContext") + return stmt.sc.QueryContext(ctx, stmt.query, args) +} + +func (stmt *snowflakeStmt) Exec(args []driver.Value) (driver.Result, error) { + glog.V(2).Infoln("Stmt.Exec") + return stmt.sc.Exec(stmt.query, args) +} + +func (stmt *snowflakeStmt) Query(args []driver.Value) (driver.Rows, error) { + glog.V(2).Infoln("Stmt.Query") + return stmt.sc.Query(stmt.query, args) +} + +// WithMultiStatement returns a context that allows the user to execute the desired number of sql queries in one query +func WithMultiStatement(ctx context.Context, num int) (context.Context, error) { + return context.WithValue(ctx, MultiStatementCount, num), nil +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/transaction.go b/vendor/github.com/snowflakedb/gosnowflake/transaction.go new file mode 100644 index 000000000000..8fa09f58c3c9 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/transaction.go @@ -0,0 +1,36 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "context" + "database/sql/driver" +) + +type snowflakeTx struct { + sc *snowflakeConn +} + +func (tx *snowflakeTx) Commit() (err error) { + if tx.sc == nil || tx.sc.rest == nil { + return driver.ErrBadConn + } + _, err = tx.sc.exec(context.TODO(), "COMMIT", false, false, nil) + if err != nil { + return + } + tx.sc = nil + return +} + +func (tx *snowflakeTx) Rollback() (err error) { + if tx.sc == nil || tx.sc.rest == nil { + return driver.ErrBadConn + } + _, err = tx.sc.exec(context.TODO(), "ROLLBACK", false, false, nil) + if err != nil { + return + } + tx.sc = nil + return +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/util.go b/vendor/github.com/snowflakedb/gosnowflake/util.go new file mode 100644 index 000000000000..e4948f61545b --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/util.go @@ -0,0 +1,49 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +import ( + "database/sql/driver" + "time" +) + +// integer min +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +// integer max +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// time.Duration max +func durationMax(d1, d2 time.Duration) time.Duration { + if d1-d2 > 0 { + return d1 + } + return d2 +} + +// time.Duration min +func durationMin(d1, d2 time.Duration) time.Duration { + if d1-d2 < 0 { + return d1 + } + return d2 +} + +// toNamedValues converts a slice of driver.Value to a slice of driver.NamedValue for Go 1.8 SQL package +func toNamedValues(values []driver.Value) []driver.NamedValue { + namedValues := make([]driver.NamedValue, len(values)) + for idx, value := range values { + namedValues[idx] = driver.NamedValue{Name: "", Ordinal: idx + 1, Value: value} + } + return namedValues +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/version.go b/vendor/github.com/snowflakedb/gosnowflake/version.go new file mode 100644 index 000000000000..0d82f1b7552a --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/version.go @@ -0,0 +1,6 @@ +// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. + +package gosnowflake + +// SnowflakeGoDriverVersion is the version of Go Snowflake Driver. +const SnowflakeGoDriverVersion = "1.3.11" diff --git a/vendor/modules.txt b/vendor/modules.txt index 21945264064b..b3169496f659 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -101,6 +101,18 @@ github.com/aliyun/alibaba-cloud-sdk-go/services/ram github.com/aliyun/alibaba-cloud-sdk-go/services/sts # github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/aliyun/aliyun-oss-go-sdk/oss +# github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 +github.com/apache/arrow/go/arrow +github.com/apache/arrow/go/arrow/array +github.com/apache/arrow/go/arrow/arrio +github.com/apache/arrow/go/arrow/bitutil +github.com/apache/arrow/go/arrow/decimal128 +github.com/apache/arrow/go/arrow/float16 +github.com/apache/arrow/go/arrow/internal/cpu +github.com/apache/arrow/go/arrow/internal/debug +github.com/apache/arrow/go/arrow/internal/flatbuf +github.com/apache/arrow/go/arrow/ipc +github.com/apache/arrow/go/arrow/memory # github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 github.com/apple/foundationdb/bindings/go/src/fdb github.com/apple/foundationdb/bindings/go/src/fdb/directory @@ -240,6 +252,8 @@ github.com/denisenkom/go-mssqldb/internal/querytext github.com/denverdino/aliyungo/common github.com/denverdino/aliyungo/ecs github.com/denverdino/aliyungo/util +# github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 +github.com/dgrijalva/jwt-go/v4 # github.com/digitalocean/godo v1.7.5 github.com/digitalocean/godo # github.com/dimchansky/utfbom v1.1.0 @@ -352,6 +366,8 @@ github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.1 github.com/golang/snappy +# github.com/google/flatbuffers v1.11.0 +github.com/google/flatbuffers/go # github.com/google/go-cmp v0.5.2 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -529,6 +545,8 @@ github.com/hashicorp/vault-plugin-database-couchbase github.com/hashicorp/vault-plugin-database-elasticsearch # github.com/hashicorp/vault-plugin-database-mongodbatlas v0.2.1 github.com/hashicorp/vault-plugin-database-mongodbatlas +# github.com/hashicorp/vault-plugin-database-snowflake v0.1.0 +github.com/hashicorp/vault-plugin-database-snowflake # github.com/hashicorp/vault-plugin-mock v0.16.1 github.com/hashicorp/vault-plugin-mock # github.com/hashicorp/vault-plugin-secrets-ad v0.8.0 @@ -861,6 +879,8 @@ github.com/petermattis/goid # github.com/pierrec/lz4 v2.5.2+incompatible github.com/pierrec/lz4 github.com/pierrec/lz4/internal/xxh32 +# github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 +github.com/pkg/browser # github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 @@ -918,6 +938,10 @@ github.com/shirou/gopsutil/net github.com/shirou/gopsutil/process # github.com/sirupsen/logrus v1.6.0 github.com/sirupsen/logrus +# github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce +github.com/snowflakedb/glog +# github.com/snowflakedb/gosnowflake v1.3.11 +github.com/snowflakedb/gosnowflake # github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d github.com/softlayer/softlayer-go/config github.com/softlayer/softlayer-go/datatypes