From 13bf68cbd4bb862d77e6c3a7574487567101a121 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 Jan 2020 11:21:56 +0100 Subject: [PATCH 1/5] Added ingestion rate global limit support Signed-off-by: Marco Pracucci --- go.mod | 1 - go.sum | 4 + pkg/distributor/distributor.go | 122 +- pkg/distributor/distributor_test.go | 159 ++- pkg/distributor/ingestion_rate_strategy.go | 57 + .../ingestion_rate_strategy_test.go | 87 ++ pkg/util/validation/limits.go | 40 +- .../cortex/pkg/distributor/billing.go | 37 + .../cortex/pkg/distributor/distributor.go | 654 +++++++++ .../pkg/distributor/distributor_ring.go | 87 ++ .../cortex/pkg/distributor/ha_tracker.go | 286 ++++ .../cortex/pkg/distributor/ha_tracker.pb.go | 446 ++++++ .../cortex/pkg/distributor/ha_tracker.proto | 13 + .../cortex/pkg/distributor/ha_tracker_http.go | 99 ++ .../cortex/pkg/distributor/http_admin.go | 98 ++ .../cortex/pkg/distributor/http_server.go | 108 ++ .../distributor/ingestion_rate_strategy.go | 73 + .../cortex/pkg/distributor/query.go | 175 +++ .../cortex/pkg/util/limiter/rate_limiter.go | 122 ++ .../cortex/pkg/util/test/poll.go | 26 + .../fluent/fluent-logger-golang/LICENSE | 202 +++ .../fluent-logger-golang/fluent/fluent.go | 309 ++++ .../fluent-logger-golang/fluent/proto.go | 24 + .../fluent-logger-golang/fluent/proto_gen.go | 372 +++++ .../fluent-logger-golang/fluent/version.go | 3 + vendor/github.com/philhofer/fwd/LICENSE.md | 7 + vendor/github.com/philhofer/fwd/README.md | 315 ++++ vendor/github.com/philhofer/fwd/reader.go | 379 +++++ vendor/github.com/philhofer/fwd/writer.go | 224 +++ .../philhofer/fwd/writer_appengine.go | 5 + .../github.com/philhofer/fwd/writer_unsafe.go | 18 + vendor/github.com/tinylib/msgp/LICENSE | 8 + .../tinylib/msgp/msgp/advise_linux.go | 24 + .../tinylib/msgp/msgp/advise_other.go | 17 + .../github.com/tinylib/msgp/msgp/appengine.go | 15 + .../github.com/tinylib/msgp/msgp/circular.go | 39 + vendor/github.com/tinylib/msgp/msgp/defs.go | 142 ++ vendor/github.com/tinylib/msgp/msgp/edit.go | 241 ++++ vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 ++ vendor/github.com/tinylib/msgp/msgp/errors.go | 142 ++ .../github.com/tinylib/msgp/msgp/extension.go | 548 +++++++ vendor/github.com/tinylib/msgp/msgp/file.go | 92 ++ .../github.com/tinylib/msgp/msgp/file_port.go | 47 + .../github.com/tinylib/msgp/msgp/integers.go | 174 +++ vendor/github.com/tinylib/msgp/msgp/json.go | 542 +++++++ .../tinylib/msgp/msgp/json_bytes.go | 363 +++++ vendor/github.com/tinylib/msgp/msgp/number.go | 267 ++++ vendor/github.com/tinylib/msgp/msgp/read.go | 1265 +++++++++++++++++ .../tinylib/msgp/msgp/read_bytes.go | 1089 ++++++++++++++ vendor/github.com/tinylib/msgp/msgp/size.go | 38 + vendor/github.com/tinylib/msgp/msgp/unsafe.go | 40 + vendor/github.com/tinylib/msgp/msgp/write.go | 845 +++++++++++ .../tinylib/msgp/msgp/write_bytes.go | 411 ++++++ .../weaveworks/billing-client/.gitignore | 23 + .../weaveworks/billing-client/Gopkg.lock | 105 ++ .../weaveworks/billing-client/Gopkg.toml | 26 + .../weaveworks/billing-client/README.md | 41 + .../weaveworks/billing-client/client.go | 207 +++ .../weaveworks/billing-client/config.go | 20 + .../weaveworks/billing-client/event.go | 45 + vendor/modules.txt | 11 + 61 files changed, 11376 insertions(+), 102 deletions(-) create mode 100644 pkg/distributor/ingestion_rate_strategy.go create mode 100644 pkg/distributor/ingestion_rate_strategy_test.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/billing.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/distributor/query.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/LICENSE create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/version.go create mode 100644 vendor/github.com/philhofer/fwd/LICENSE.md create mode 100644 vendor/github.com/philhofer/fwd/README.md create mode 100644 vendor/github.com/philhofer/fwd/reader.go create mode 100644 vendor/github.com/philhofer/fwd/writer.go create mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go create mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go create mode 100644 vendor/github.com/tinylib/msgp/LICENSE create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_linux.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_other.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/appengine.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file_port.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/unsafe.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go create mode 100644 vendor/github.com/weaveworks/billing-client/.gitignore create mode 100644 vendor/github.com/weaveworks/billing-client/Gopkg.lock create mode 100644 vendor/github.com/weaveworks/billing-client/Gopkg.toml create mode 100644 vendor/github.com/weaveworks/billing-client/README.md create mode 100644 vendor/github.com/weaveworks/billing-client/client.go create mode 100644 vendor/github.com/weaveworks/billing-client/config.go create mode 100644 vendor/github.com/weaveworks/billing-client/event.go diff --git a/go.mod b/go.mod index e2d2c90605aa..c5988f626dd2 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,6 @@ require ( golang.org/x/net v0.0.0-20190923162816-aa69164e4478 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect golang.org/x/sys v0.0.0-20191218084908-4a24b4065292 // indirect - golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/tools v0.0.0-20190925134113-a044388aa56f // indirect google.golang.org/appengine v1.6.3 // indirect google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 // indirect diff --git a/go.sum b/go.sum index 992dc1d45688..ec0de8bd9be1 100644 --- a/go.sum +++ b/go.sum @@ -197,6 +197,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= +github.com/fluent/fluent-logger-golang v1.2.1 h1:CMA+mw2zMiOGEOarZtaqM3GBWT1IVLNncNi0nKELtmU= github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= @@ -563,6 +564,7 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8 h1:jkUFVqrKRttbdDqkTrvOmHxfqIsJK0Oe2WGi1ACAE+M= github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible h1:5isCJDRADbeSlWx6KVXAYwrcihyCGVXr7GNCdLEVDr8= @@ -666,6 +668,7 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/thanos-io/thanos v0.8.1/go.mod h1:qQDi/6tgypn96+VzSumlxfJIgFX2y3ablfhHHLZ05cg= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d h1:Ninez2SUm08xpmnw7kVxCeOc3DahF6IuMuRMCdM4wTQ= github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= @@ -687,6 +690,7 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 h1:qi+YkNiB7T3Ikw1DoDIFhdAPbDU7fUPDsKrUoZdupnQ= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1/go.mod h1:7gGdEUJaCrSlWi/mjd68CZv0sfqektYPDcro9cE+M9k= github.com/weaveworks/common v0.0.0-20190822150010-afb9996716e4 h1:O8BmyjqQoByXjAj6XaTfcxxqSIK6DYLmOSiYQPL9yJg= github.com/weaveworks/common v0.0.0-20190822150010-afb9996716e4/go.mod h1:pSm+0KR57BG3pvGoJWFXJSAC7+sEPewcvdt5StevL3A= diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 2ee9584bcc12..31f4d086fd27 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -4,13 +4,14 @@ import ( "context" "flag" "net/http" - "sync" "sync/atomic" "time" + cortex_distributor "github.com/cortexproject/cortex/pkg/distributor" cortex_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" cortex_util "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/limiter" cortex_validation "github.com/cortexproject/cortex/pkg/util/validation" "github.com/go-kit/kit/log/level" @@ -19,7 +20,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" - "golang.org/x/time/rate" "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/loki/pkg/ingester/client" @@ -30,7 +30,6 @@ import ( const ( metricName = "logs" - bytesInMB = 1048576 ) var readinessProbeSuccess = []byte("Ready") @@ -60,32 +59,36 @@ var ( // Config for a Distributor. type Config struct { - // For testing. - factory func(addr string) (grpc_health_v1.HealthClient, error) + // Distributors ring + DistributorRing cortex_distributor.RingConfig `yaml:"ring,omitempty"` - LimiterReloadPeriod time.Duration `yaml:"limiter_reload_period"` + // For testing. + factory func(addr string) (grpc_health_v1.HealthClient, error) `yaml:"-"` } // RegisterFlags registers the flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.LimiterReloadPeriod, "distributor.limiter-reload-period", 5*time.Minute, "Period at which to reload user ingestion limits.") + cfg.DistributorRing.RegisterFlags(f) } // Distributor coordinates replicates and distribution of log streams. type Distributor struct { - cfg Config - clientCfg client.Config - ring ring.ReadRing - overrides *validation.Overrides - pool *cortex_client.Pool - - ingestLimitersMtx sync.RWMutex - ingestLimiters map[string]*rate.Limiter - quit chan struct{} + cfg Config + clientCfg client.Config + ingestersRing ring.ReadRing + overrides *validation.Overrides + pool *cortex_client.Pool + + // The global rate limiter requires a distributors ring to count + // the number of healthy instances + distributorsRing *ring.Lifecycler + + // Per-user rate limiter. + ingestionRateLimiter *limiter.RateLimiter } // New a distributor creates. -func New(cfg Config, clientCfg client.Config, ring ring.ReadRing, overrides *validation.Overrides) (*Distributor, error) { +func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overrides *validation.Overrides) (*Distributor, error) { factory := cfg.factory if factory == nil { factory = func(addr string) (grpc_health_v1.HealthClient, error) { @@ -93,44 +96,43 @@ func New(cfg Config, clientCfg client.Config, ring ring.ReadRing, overrides *val } } - d := Distributor{ - cfg: cfg, - clientCfg: clientCfg, - ring: ring, - overrides: overrides, - pool: cortex_client.NewPool(clientCfg.PoolConfig, ring, factory, cortex_util.Logger), - ingestLimiters: map[string]*rate.Limiter{}, - quit: make(chan struct{}), - } + // Create the configured ingestion rate limit strategy (local or global). In case + // it's an internal dependency and can't join the distributors ring, we skip rate + // limiting. + var ingestionRateStrategy limiter.RateLimiterStrategy + var distributorsRing *ring.Lifecycler - go d.loop() + if overrides.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy { + var err error + distributorsRing, err = ring.NewLifecycler(cfg.DistributorRing.ToLifecyclerConfig(), nil, "distributor", ring.DistributorRingKey) + if err != nil { + return nil, err + } - return &d, nil -} + distributorsRing.Start() -func (d *Distributor) loop() { - if d.cfg.LimiterReloadPeriod == 0 { - return + ingestionRateStrategy = newGlobalIngestionRateStrategy(overrides, distributorsRing) + } else { + ingestionRateStrategy = newLocalIngestionRateStrategy(overrides) } - ticker := time.NewTicker(d.cfg.LimiterReloadPeriod) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - d.ingestLimitersMtx.Lock() - d.ingestLimiters = make(map[string]*rate.Limiter, len(d.ingestLimiters)) - d.ingestLimitersMtx.Unlock() - - case <-d.quit: - return - } + d := Distributor{ + cfg: cfg, + clientCfg: clientCfg, + ingestersRing: ingestersRing, + distributorsRing: distributorsRing, + overrides: overrides, + pool: cortex_client.NewPool(clientCfg.PoolConfig, ingestersRing, factory, cortex_util.Logger), + ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), } + + return &d, nil } func (d *Distributor) Stop() { - close(d.quit) + if d.distributorsRing != nil { + d.distributorsRing.Shutdown() + } } // TODO taken from Cortex, see if we can refactor out an usable interface. @@ -153,7 +155,7 @@ type pushTracker struct { // ReadinessHandler is used to indicate to k8s when the distributor is ready. // Returns 200 when the distributor is ready, 500 otherwise. func (d *Distributor) ReadinessHandler(w http.ResponseWriter, r *http.Request) { - _, err := d.ring.GetAll() + _, err := d.ingestersRing.GetAll() if err != nil { http.Error(w, "Not ready: "+err.Error(), http.StatusInternalServerError) return @@ -226,13 +228,13 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log return &logproto.PushResponse{}, validationErr } - limiter := d.getOrCreateIngestLimiter(userID) - if !limiter.AllowN(time.Now(), validatedSamplesSize) { + now := time.Now() + if !d.ingestionRateLimiter.AllowN(now, userID, validatedSamplesSize) { // Return a 4xx here to have the client discard the data and not retry. If a client // is sending too much data consistently we will unlikely ever catch up otherwise. validation.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamplesCount)) validation.DiscardedBytes.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamplesSize)) - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%d) exceeded while adding %d lines", int(limiter.Limit()), validatedSamplesCount) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%d bytes) exceeded while adding %d lines for a total size of %d bytes", int(d.ingestionRateLimiter.Limit(now, userID)), validatedSamplesCount, validatedSamplesSize) } const maxExpectedReplicationSet = 5 // typical replication factor 3 plus one for inactive plus one for luck @@ -241,7 +243,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log samplesByIngester := map[string][]*streamTracker{} ingesterDescs := map[string]ring.IngesterDesc{} for i, key := range keys { - replicationSet, err := d.ring.Get(key, ring.Write, descs[:0]) + replicationSet, err := d.ingestersRing.Get(key, ring.Write, descs[:0]) if err != nil { return nil, err } @@ -349,21 +351,3 @@ func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.Ingester func (*Distributor) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil } - -func (d *Distributor) getOrCreateIngestLimiter(userID string) *rate.Limiter { - d.ingestLimitersMtx.RLock() - limiter, ok := d.ingestLimiters[userID] - d.ingestLimitersMtx.RUnlock() - - if ok { - return limiter - } - - limiter = rate.NewLimiter(rate.Limit(int64(d.overrides.IngestionRate(userID)*bytesInMB)), int(d.overrides.IngestionBurstSize(userID)*bytesInMB)) - - d.ingestLimitersMtx.Lock() - d.ingestLimiters[userID] = limiter - d.ingestLimitersMtx.Unlock() - - return limiter -} diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index cea97b4efa31..c6ff0f8f394a 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -3,12 +3,18 @@ package distributor import ( "context" "fmt" + "math/rand" "net/http" + "strconv" + "strings" "testing" "time" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/ring/kv/consul" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/test" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" @@ -24,8 +30,7 @@ import ( ) const ( - numIngesters = 5 - ingestionRateLimit = 0.000096 // 100 Bytes/s limit + numIngesters = 5 ) var ( @@ -34,24 +39,32 @@ var ( ) func TestDistributor(t *testing.T) { + ingestionRateLimit := 0.000096 // 100 Bytes/s limit + for i, tc := range []struct { - samples int + lines int expectedResponse *logproto.PushResponse expectedError error }{ { - samples: 10, + lines: 10, expectedResponse: success, }, { - samples: 100, - expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (100) exceeded while adding 100 lines"), + lines: 100, + expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (100 bytes) exceeded while adding 100 lines for a total size of 1000 bytes"), }, } { - t.Run(fmt.Sprintf("[%d](samples=%v)", i, tc.samples), func(t *testing.T) { - d := prepare(t) + t.Run(fmt.Sprintf("[%d](samples=%v)", i, tc.lines), func(t *testing.T) { + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.EnforceMetricName = false + limits.IngestionRateMB = ingestionRateLimit + limits.IngestionBurstSizeMB = ingestionRateLimit + + d := prepare(t, limits, nil) - request := makeWriteRequest(tc.samples) + request := makeWriteRequest(tc.lines, 10) response, err := d.Push(ctx, request) assert.Equal(t, tc.expectedResponse, response) assert.Equal(t, tc.expectedError, err) @@ -59,45 +72,143 @@ func TestDistributor(t *testing.T) { } } -func prepare(t *testing.T) *Distributor { +func TestDistributor_PushIngestionRateLimiter(t *testing.T) { + type testPush struct { + bytes int + expectedError error + } + + tests := map[string]struct { + distributors int + ingestionRateStrategy string + ingestionRateMB float64 + ingestionBurstSizeMB float64 + pushes []testPush + }{ + "local strategy: limit should be set to each distributor": { + distributors: 2, + ingestionRateStrategy: validation.LocalIngestionRateStrategy, + ingestionRateMB: 10 * (1.0 / float64(bytesInMB)), + ingestionBurstSizeMB: 10 * (1.0 / float64(bytesInMB)), + pushes: []testPush{ + {bytes: 5, expectedError: nil}, + {bytes: 6, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10 bytes) exceeded while adding 1 lines for a total size of 6 bytes")}, + {bytes: 5, expectedError: nil}, + {bytes: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10 bytes) exceeded while adding 1 lines for a total size of 1 bytes")}, + }, + }, + "global strategy: limit should be evenly shared across distributors": { + distributors: 2, + ingestionRateStrategy: validation.GlobalIngestionRateStrategy, + ingestionRateMB: 10 * (1.0 / float64(bytesInMB)), + ingestionBurstSizeMB: 5 * (1.0 / float64(bytesInMB)), + pushes: []testPush{ + {bytes: 3, expectedError: nil}, + {bytes: 3, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5 bytes) exceeded while adding 1 lines for a total size of 3 bytes")}, + {bytes: 2, expectedError: nil}, + {bytes: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5 bytes) exceeded while adding 1 lines for a total size of 1 bytes")}, + }, + }, + "global strategy: burst should set to each distributor": { + distributors: 2, + ingestionRateStrategy: validation.GlobalIngestionRateStrategy, + ingestionRateMB: 10 * (1.0 / float64(bytesInMB)), + ingestionBurstSizeMB: 20 * (1.0 / float64(bytesInMB)), + pushes: []testPush{ + {bytes: 15, expectedError: nil}, + {bytes: 6, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5 bytes) exceeded while adding 1 lines for a total size of 6 bytes")}, + {bytes: 5, expectedError: nil}, + {bytes: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5 bytes) exceeded while adding 1 lines for a total size of 1 bytes")}, + }, + }, + } + + for testName, testData := range tests { + testData := testData + + t.Run(testName, func(t *testing.T) { + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.EnforceMetricName = false + limits.IngestionRateStrategy = testData.ingestionRateStrategy + limits.IngestionRateMB = testData.ingestionRateMB + limits.IngestionBurstSizeMB = testData.ingestionBurstSizeMB + + // Init a shared KVStore + kvStore := consul.NewInMemoryClient(ring.GetCodec()) + + // Start all expected distributors + distributors := make([]*Distributor, testData.distributors) + for i := 0; i < testData.distributors; i++ { + distributors[i] = prepare(t, limits, kvStore) + defer distributors[i].Stop() + } + + // If the distributors ring is setup, wait until the first distributor + // updates to the expected size + if distributors[0].distributorsRing != nil { + test.Poll(t, time.Second, testData.distributors, func() interface{} { + return distributors[0].distributorsRing.HealthyInstancesCount() + }) + } + + // Push samples in multiple requests to the first distributor + for _, push := range testData.pushes { + request := makeWriteRequest(1, push.bytes) + response, err := distributors[0].Push(ctx, request) + + if push.expectedError == nil { + assert.Equal(t, success, response) + assert.Nil(t, err) + } else { + assert.Nil(t, response) + assert.Equal(t, push.expectedError, err) + } + } + }) + } +} + +func prepare(t *testing.T, limits *validation.Limits, kvStore kv.Client) *Distributor { var ( distributorConfig Config - defaultLimits validation.Limits clientConfig client.Config ) - flagext.DefaultValues(&distributorConfig, &defaultLimits, &clientConfig) - defaultLimits.EnforceMetricName = false - defaultLimits.IngestionRate = ingestionRateLimit - defaultLimits.IngestionBurstSize = ingestionRateLimit + flagext.DefaultValues(&distributorConfig, &clientConfig) - limits, err := validation.NewOverrides(defaultLimits) + overrides, err := validation.NewOverrides(*limits) require.NoError(t, err) + // Mock the ingesters ring ingesters := map[string]*mockIngester{} for i := 0; i < numIngesters; i++ { ingesters[fmt.Sprintf("ingester%d", i)] = &mockIngester{} } - r := &mockRing{ + ingestersRing := &mockRing{ replicationFactor: 3, } for addr := range ingesters { - r.ingesters = append(r.ingesters, ring.IngesterDesc{ + ingestersRing.ingesters = append(ingestersRing.ingesters, ring.IngesterDesc{ Addr: addr, }) } + distributorConfig.DistributorRing.HeartbeatPeriod = 100 * time.Millisecond + distributorConfig.DistributorRing.InstanceID = strconv.Itoa(rand.Int()) + distributorConfig.DistributorRing.KVStore.Mock = kvStore + distributorConfig.DistributorRing.InstanceInterfaceNames = []string{"eth0", "en0", "lo0"} distributorConfig.factory = func(addr string) (grpc_health_v1.HealthClient, error) { return ingesters[addr], nil } - d, err := New(distributorConfig, clientConfig, r, limits) + d, err := New(distributorConfig, clientConfig, ingestersRing, overrides) require.NoError(t, err) return d } -func makeWriteRequest(samples int) *logproto.PushRequest { +func makeWriteRequest(lines int, size int) *logproto.PushRequest { req := logproto.PushRequest{ Streams: []*logproto.Stream{ { @@ -106,10 +217,14 @@ func makeWriteRequest(samples int) *logproto.PushRequest { }, } - for i := 0; i < samples; i++ { + for i := 0; i < lines; i++ { + // Construct the log line, honoring the input size + line := strconv.Itoa(i) + strings.Repeat(" ", size) + line = line[:size] + req.Streams[0].Entries = append(req.Streams[0].Entries, logproto.Entry{ Timestamp: time.Unix(0, 0), - Line: fmt.Sprintf("line %d", i), + Line: line, }) } return &req diff --git a/pkg/distributor/ingestion_rate_strategy.go b/pkg/distributor/ingestion_rate_strategy.go new file mode 100644 index 000000000000..e41805797083 --- /dev/null +++ b/pkg/distributor/ingestion_rate_strategy.go @@ -0,0 +1,57 @@ +package distributor + +import ( + "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/grafana/loki/pkg/util/validation" +) + +// ReadLifecycler represents the read interface to the lifecycler. +type ReadLifecycler interface { + HealthyInstancesCount() int +} + +type localStrategy struct { + limits *validation.Overrides +} + +func newLocalIngestionRateStrategy(limits *validation.Overrides) limiter.RateLimiterStrategy { + return &localStrategy{ + limits: limits, + } +} + +func (s *localStrategy) Limit(userID string) float64 { + return s.limits.IngestionRateBytes(userID) +} + +func (s *localStrategy) Burst(userID string) int { + return s.limits.IngestionBurstSizeBytes(userID) +} + +type globalStrategy struct { + limits *validation.Overrides + ring ReadLifecycler +} + +func newGlobalIngestionRateStrategy(limits *validation.Overrides, ring ReadLifecycler) limiter.RateLimiterStrategy { + return &globalStrategy{ + limits: limits, + ring: ring, + } +} + +func (s *globalStrategy) Limit(userID string) float64 { + numDistributors := s.ring.HealthyInstancesCount() + + if numDistributors == 0 { + return s.limits.IngestionRateBytes(userID) + } + + return s.limits.IngestionRateBytes(userID) / float64(numDistributors) +} + +func (s *globalStrategy) Burst(userID string) int { + // The meaning of burst doesn't change for the global strategy, in order + // to keep it easier to understand for users / operators. + return s.limits.IngestionBurstSizeBytes(userID) +} diff --git a/pkg/distributor/ingestion_rate_strategy_test.go b/pkg/distributor/ingestion_rate_strategy_test.go new file mode 100644 index 000000000000..1be590fd5953 --- /dev/null +++ b/pkg/distributor/ingestion_rate_strategy_test.go @@ -0,0 +1,87 @@ +package distributor + +import ( + "testing" + + "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/grafana/loki/pkg/util/validation" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +const ( + bytesInMB = 1048576 +) + +func TestIngestionRateStrategy(t *testing.T) { + tests := map[string]struct { + limits validation.Limits + ring ReadLifecycler + expectedLimit float64 + expectedBurst int + }{ + "local rate limiter should just return configured limits": { + limits: validation.Limits{ + IngestionRateStrategy: validation.LocalIngestionRateStrategy, + IngestionRateMB: 1.0, + IngestionBurstSizeMB: 2.0, + }, + ring: nil, + expectedLimit: 1.0 * float64(bytesInMB), + expectedBurst: int(2.0 * float64(bytesInMB)), + }, + "global rate limiter should share the limit across the number of distributors": { + limits: validation.Limits{ + IngestionRateStrategy: validation.GlobalIngestionRateStrategy, + IngestionRateMB: 1.0, + IngestionBurstSizeMB: 2.0, + }, + ring: func() ReadLifecycler { + ring := newReadLifecyclerMock() + ring.On("HealthyInstancesCount").Return(2) + return ring + }(), + expectedLimit: 0.5 * float64(bytesInMB), + expectedBurst: int(2.0 * float64(bytesInMB)), + }, + } + + for testName, testData := range tests { + testData := testData + + t.Run(testName, func(t *testing.T) { + var strategy limiter.RateLimiterStrategy + + // Init limits overrides + overrides, err := validation.NewOverrides(testData.limits) + require.NoError(t, err) + + // Instance the strategy + switch testData.limits.IngestionRateStrategy { + case validation.LocalIngestionRateStrategy: + strategy = newLocalIngestionRateStrategy(overrides) + case validation.GlobalIngestionRateStrategy: + strategy = newGlobalIngestionRateStrategy(overrides, testData.ring) + default: + require.Fail(t, "Unknown strategy") + } + + assert.Equal(t, strategy.Limit("test"), testData.expectedLimit) + assert.Equal(t, strategy.Burst("test"), testData.expectedBurst) + }) + } +} + +type readLifecyclerMock struct { + mock.Mock +} + +func newReadLifecyclerMock() *readLifecyclerMock { + return &readLifecyclerMock{} +} + +func (m *readLifecyclerMock) HealthyInstancesCount() int { + args := m.Called() + return args.Int(0) +} diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 62dae688a524..fea25e3a184c 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -10,12 +10,23 @@ import ( "gopkg.in/yaml.v2" ) +const ( + // Local ingestion rate strategy + LocalIngestionRateStrategy = "local" + + // Global ingestion rate strategy + GlobalIngestionRateStrategy = "global" + + bytesInMB = 1048576 +) + // Limits describe all the limits for users; can be used to describe global default // limits via flags, or per-user limits via yaml config. type Limits struct { // Distributor enforced limits. - IngestionRate float64 `yaml:"ingestion_rate_mb"` - IngestionBurstSize float64 `yaml:"ingestion_burst_size_mb"` + IngestionRateStrategy string `yaml:"ingestion_rate_strategy"` + IngestionRateMB float64 `yaml:"ingestion_rate_mb"` + IngestionBurstSizeMB float64 `yaml:"ingestion_burst_size_mb"` MaxLabelNameLength int `yaml:"max_label_name_length"` MaxLabelValueLength int `yaml:"max_label_value_length"` MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series"` @@ -41,8 +52,9 @@ type Limits struct { // RegisterFlags adds the flags required to config this to the given FlagSet func (l *Limits) RegisterFlags(f *flag.FlagSet) { - f.Float64Var(&l.IngestionRate, "distributor.ingestion-rate-limit-mb", 4, "Per-user ingestion rate limit in sample size per second. Units in MB.") - f.Float64Var(&l.IngestionBurstSize, "distributor.ingestion-burst-size-mb", 6, "Per-user allowed ingestion burst size (in sample size). Units in MB. Warning, very high limits will be reset every -distributor.limiter-reload-period.") + f.StringVar(&l.IngestionRateStrategy, "distributor.ingestion-rate-limit-strategy", "local", "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global).") + f.Float64Var(&l.IngestionRateMB, "distributor.ingestion-rate-limit-mb", 4, "Per-user ingestion rate limit in sample size per second. Units in MB.") + f.Float64Var(&l.IngestionBurstSizeMB, "distributor.ingestion-burst-size-mb", 6, "Per-user allowed ingestion burst size (in sample size). Units in MB.") f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names") f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") @@ -117,14 +129,22 @@ func (o *Overrides) Stop() { o.overridesManager.Stop() } -// IngestionRate returns the limit on ingester rate (samples per second). -func (o *Overrides) IngestionRate(userID string) float64 { - return o.overridesManager.GetLimits(userID).(*Limits).IngestionRate +// IngestionRateStrategy returns whether the ingestion rate limit should be individually applied +// to each distributor instance (local) or evenly shared across the cluster (global). +func (o *Overrides) IngestionRateStrategy() string { + // The ingestion rate strategy can't be overridden on a per-tenant basis, + // so here we just pick the value for a not-existing user ID (empty string). + return o.overridesManager.GetLimits("").(*Limits).IngestionRateStrategy +} + +// IngestionRateBytes returns the limit on ingester rate (MBs per second). +func (o *Overrides) IngestionRateBytes(userID string) float64 { + return o.overridesManager.GetLimits(userID).(*Limits).IngestionRateMB * bytesInMB } -// IngestionBurstSize returns the burst size for ingestion rate. -func (o *Overrides) IngestionBurstSize(userID string) float64 { - return o.overridesManager.GetLimits(userID).(*Limits).IngestionBurstSize +// IngestionBurstSizeBytes returns the burst size for ingestion rate. +func (o *Overrides) IngestionBurstSizeBytes(userID string) int { + return int(o.overridesManager.GetLimits(userID).(*Limits).IngestionBurstSizeMB * bytesInMB) } // MaxLabelNameLength returns maximum length a label name can be. diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/billing.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/billing.go new file mode 100644 index 000000000000..2c31581cdbcf --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/billing.go @@ -0,0 +1,37 @@ +package distributor + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "time" + + billing "github.com/weaveworks/billing-client" + "github.com/weaveworks/common/user" +) + +func init() { + billing.MustRegisterMetrics() +} + +func (d *Distributor) emitBillingRecord(ctx context.Context, buf []byte, samples int64) error { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return err + } + + now := time.Now().UTC() + hasher := sha256.New() + hasher.Write(buf) + hash := "sha256:" + base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + amounts := billing.Amounts{ + billing.Samples: samples, + } + return d.billingClient.AddAmounts( + hash, + userID, + now, + amounts, + nil, + ) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go new file mode 100644 index 000000000000..efba1fec25ed --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -0,0 +1,654 @@ +package distributor + +import ( + "context" + "flag" + "fmt" + "net/http" + "sort" + "strings" + "time" + + "google.golang.org/grpc/health/grpc_health_v1" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/ingester/client" + ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/prom1/storage/metric" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/validation" + billing "github.com/weaveworks/billing-client" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/instrument" + "github.com/weaveworks/common/user" +) + +var ( + queryDuration = instrument.NewHistogramCollector(promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "distributor_query_duration_seconds", + Help: "Time spent executing expression queries.", + Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30}, + }, []string{"method", "status_code"})) + receivedSamples = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_received_samples_total", + Help: "The total number of received samples, excluding rejected and deduped samples.", + }, []string{"user"}) + incomingSamples = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_samples_in_total", + Help: "The total number of samples that have come in to the distributor, including rejected or deduped samples.", + }, []string{"user"}) + nonHASamples = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_non_ha_samples_received_total", + Help: "The total number of received samples for a user that has HA tracking turned on, but the sample didn't contain both HA labels.", + }, []string{"user"}) + dedupedSamples = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_deduped_samples_total", + Help: "The total number of deduplicated samples.", + }, []string{"user", "cluster"}) + labelsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "labels_per_sample", + Help: "Number of labels per sample.", + Buckets: []float64{5, 10, 15, 20, 25}, + }) + ingesterAppends = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_ingester_appends_total", + Help: "The total number of batch appends sent to ingesters.", + }, []string{"ingester"}) + ingesterAppendFailures = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_ingester_append_failures_total", + Help: "The total number of failed batch appends sent to ingesters.", + }, []string{"ingester"}) + ingesterQueries = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_ingester_queries_total", + Help: "The total number of queries sent to ingesters.", + }, []string{"ingester"}) + ingesterQueryFailures = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_ingester_query_failures_total", + Help: "The total number of failed queries sent to ingesters.", + }, []string{"ingester"}) + replicationFactor = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "distributor_replication_factor", + Help: "The configured replication factor.", + }) + emptyPreallocSeries = ingester_client.PreallocTimeseries{} +) + +// Distributor is a storage.SampleAppender and a client.Querier which +// forwards appends and queries to individual ingesters. +type Distributor struct { + cfg Config + ingestersRing ring.ReadRing + ingesterPool *ingester_client.Pool + limits *validation.Overrides + billingClient *billing.Client + + // The global rate limiter requires a distributors ring to count + // the number of healthy instances + distributorsRing *ring.Lifecycler + + // For handling HA replicas. + Replicas *haTracker + + // Per-user rate limiter. + ingestionRateLimiter *limiter.RateLimiter +} + +// Config contains the configuration require to +// create a Distributor +type Config struct { + EnableBilling bool `yaml:"enable_billing,omitempty"` + BillingConfig billing.Config `yaml:"billing,omitempty"` + PoolConfig ingester_client.PoolConfig `yaml:"pool,omitempty"` + + HATrackerConfig HATrackerConfig `yaml:"ha_tracker,omitempty"` + + MaxRecvMsgSize int `yaml:"max_recv_msg_size"` + RemoteTimeout time.Duration `yaml:"remote_timeout,omitempty"` + ExtraQueryDelay time.Duration `yaml:"extra_queue_delay,omitempty"` + + ShardByAllLabels bool `yaml:"shard_by_all_labels,omitempty"` + + // Distributors ring + DistributorRing RingConfig `yaml:"ring,omitempty"` + + // for testing + ingesterClientFactory client.Factory `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.BillingConfig.RegisterFlags(f) + cfg.PoolConfig.RegisterFlags(f) + cfg.HATrackerConfig.RegisterFlags(f) + cfg.DistributorRing.RegisterFlags(f) + + f.BoolVar(&cfg.EnableBilling, "distributor.enable-billing", false, "Report number of ingested samples to billing system.") + f.IntVar(&cfg.MaxRecvMsgSize, "distributor.max-recv-msg-size", 100<<20, "remote_write API max receive message size (bytes).") + f.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.") + f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") + flagext.DeprecatedFlag(f, "distributor.limiter-reload-period", "DEPRECATED. No more required because the local limiter is reconfigured as soon as the overrides change.") + f.BoolVar(&cfg.ShardByAllLabels, "distributor.shard-by-all-labels", false, "Distribute samples based on all labels, as opposed to solely by user and metric name.") +} + +// Validate config and returns error on failure +func (cfg *Config) Validate() error { + return cfg.HATrackerConfig.Validate() +} + +// New constructs a new Distributor +func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Overrides, ingestersRing ring.ReadRing, canJoinDistributorsRing bool) (*Distributor, error) { + if cfg.ingesterClientFactory == nil { + cfg.ingesterClientFactory = func(addr string) (grpc_health_v1.HealthClient, error) { + return ingester_client.MakeIngesterClient(addr, clientConfig) + } + } + + var billingClient *billing.Client + if cfg.EnableBilling { + var err error + billingClient, err = billing.NewClient(cfg.BillingConfig) + if err != nil { + return nil, err + } + } + + replicationFactor.Set(float64(ingestersRing.ReplicationFactor())) + cfg.PoolConfig.RemoteTimeout = cfg.RemoteTimeout + + replicas, err := newClusterTracker(cfg.HATrackerConfig) + if err != nil { + return nil, err + } + + // Create the configured ingestion rate limit strategy (local or global). In case + // it's an internal dependency and can't join the distributors ring, we skip rate + // limiting. + var ingestionRateStrategy limiter.RateLimiterStrategy + var distributorsRing *ring.Lifecycler + + if !canJoinDistributorsRing { + ingestionRateStrategy = newInfiniteIngestionRateStrategy() + } else if limits.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy { + distributorsRing, err = ring.NewLifecycler(cfg.DistributorRing.ToLifecyclerConfig(), nil, "distributor", ring.DistributorRingKey) + if err != nil { + return nil, err + } + + distributorsRing.Start() + + ingestionRateStrategy = newGlobalIngestionRateStrategy(limits, distributorsRing) + } else { + ingestionRateStrategy = newLocalIngestionRateStrategy(limits) + } + + d := &Distributor{ + cfg: cfg, + ingestersRing: ingestersRing, + ingesterPool: ingester_client.NewPool(cfg.PoolConfig, ingestersRing, cfg.ingesterClientFactory, util.Logger), + billingClient: billingClient, + distributorsRing: distributorsRing, + limits: limits, + ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), + Replicas: replicas, + } + + return d, nil +} + +// Stop stops the distributor's maintenance loop. +func (d *Distributor) Stop() { + d.ingesterPool.Stop() + d.Replicas.stop() + + if d.distributorsRing != nil { + d.distributorsRing.Shutdown() + } +} + +func (d *Distributor) tokenForLabels(userID string, labels []client.LabelAdapter) (uint32, error) { + if d.cfg.ShardByAllLabels { + return shardByAllLabels(userID, labels) + } + + metricName, err := extract.MetricNameFromLabelAdapters(labels) + if err != nil { + return 0, err + } + return shardByMetricName(userID, metricName), nil +} + +func shardByMetricName(userID string, metricName string) uint32 { + h := client.HashNew32() + h = client.HashAdd32(h, userID) + h = client.HashAdd32(h, metricName) + return h +} + +func shardByAllLabels(userID string, labels []client.LabelAdapter) (uint32, error) { + h := client.HashNew32() + h = client.HashAdd32(h, userID) + var lastLabelName string + for _, label := range labels { + if strings.Compare(lastLabelName, label.Name) >= 0 { + return 0, fmt.Errorf("Labels not sorted") + } + h = client.HashAdd32(h, label.Name) + h = client.HashAdd32(h, label.Value) + } + return h, nil +} + +// Remove the label labelname from a slice of LabelPairs if it exists. +func removeLabel(labelName string, labels *[]client.LabelAdapter) { + for i := 0; i < len(*labels); i++ { + pair := (*labels)[i] + if pair.Name == labelName { + *labels = append((*labels)[:i], (*labels)[i+1:]...) + return + } + } +} + +// Returns a boolean that indicates whether or not we want to remove the replica label going forward, +// and an error that indicates whether we want to accept samples based on the cluster/replica found in ts. +// nil for the error means accept the sample. +func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica string) (bool, error) { + // If the sample doesn't have either HA label, accept it. + // At the moment we want to accept these samples by default. + if cluster == "" || replica == "" { + return false, nil + } + + // At this point we know we have both HA labels, we should lookup + // the cluster/instance here to see if we want to accept this sample. + err := d.Replicas.checkReplica(ctx, userID, cluster, replica) + // checkReplica should only have returned an error if there was a real error talking to Consul, or if the replica labels don't match. + if err != nil { // Don't accept the sample. + return false, err + } + return true, nil +} + +// Validates a single series from a write request. Will remove labels if +// any are configured to be dropped for the user ID. +// Returns the validated series with it's labels/samples, and any error. +func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, userID string) (client.PreallocTimeseries, error) { + labelsHistogram.Observe(float64(len(ts.Labels))) + if err := validation.ValidateLabels(d.limits, userID, ts.Labels); err != nil { + return emptyPreallocSeries, err + } + + metricName, _ := extract.MetricNameFromLabelAdapters(ts.Labels) + samples := make([]client.Sample, 0, len(ts.Samples)) + for _, s := range ts.Samples { + if err := validation.ValidateSample(d.limits, userID, metricName, s); err != nil { + return emptyPreallocSeries, err + } + samples = append(samples, s) + } + + return client.PreallocTimeseries{ + TimeSeries: &client.TimeSeries{ + Labels: ts.Labels, + Samples: samples, + }, + }, + nil +} + +// Push implements client.IngesterServer +func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + var lastPartialErr error + removeReplica := false + + numSamples := 0 + for _, ts := range req.Timeseries { + numSamples += len(ts.Samples) + } + // Count the total samples in, prior to validation or deuplication, for comparison with other metrics. + incomingSamples.WithLabelValues(userID).Add(float64(numSamples)) + + if d.limits.AcceptHASamples(userID) && len(req.Timeseries) > 0 { + cluster, replica := findHALabels(d.limits.HAReplicaLabel(userID), d.limits.HAClusterLabel(userID), req.Timeseries[0].Labels) + removeReplica, err = d.checkSample(ctx, userID, cluster, replica) + if err != nil { + if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() == 202 { + // These samples have been deduped. + dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numSamples)) + } + + // Ensure the request slice is reused if the series get deduped. + client.ReuseSlice(req.Timeseries) + + return nil, err + } + // If there wasn't an error but removeReplica is false that means we didn't find both HA labels. + if !removeReplica { + nonHASamples.WithLabelValues(userID).Add(float64(numSamples)) + } + } + + // For each timeseries, compute a hash to distribute across ingesters; + // check each sample and discard if outside limits. + validatedTimeseries := make([]client.PreallocTimeseries, 0, len(req.Timeseries)) + keys := make([]uint32, 0, len(req.Timeseries)) + validatedSamples := 0 + for _, ts := range req.Timeseries { + // If we found both the cluster and replica labels, we only want to include the cluster label when + // storing series in Cortex. If we kept the replica label we would end up with another series for the same + // series we're trying to dedupe when HA tracking moves over to a different replica. + if removeReplica { + removeLabel(d.limits.HAReplicaLabel(userID), &ts.Labels) + } + + for _, labelName := range d.limits.DropLabels(userID) { + removeLabel(labelName, &ts.Labels) + } + + if len(ts.Labels) == 0 { + continue + } + + // Generate the sharding token based on the series labels without the HA replica + // label and dropped labels (if any) + key, err := d.tokenForLabels(userID, ts.Labels) + if err != nil { + return nil, err + } + + validatedSeries, err := d.validateSeries(ts, userID) + + // Errors in validation are considered non-fatal, as one series in a request may contain + // invalid data but all the remaining series could be perfectly valid. + if err != nil { + lastPartialErr = err + } + + // validateSeries would have returned an emptyPreallocSeries if there were no valid samples. + if validatedSeries == emptyPreallocSeries { + continue + } + + metricName, _ := extract.MetricNameFromLabelAdapters(ts.Labels) + samples := make([]client.Sample, 0, len(ts.Samples)) + for _, s := range ts.Samples { + if err := validation.ValidateSample(d.limits, userID, metricName, s); err != nil { + lastPartialErr = err + continue + } + samples = append(samples, s) + } + + keys = append(keys, key) + validatedTimeseries = append(validatedTimeseries, validatedSeries) + validatedSamples += len(ts.Samples) + } + receivedSamples.WithLabelValues(userID).Add(float64(validatedSamples)) + + if len(keys) == 0 { + // Ensure the request slice is reused if there's no series passing the validation. + client.ReuseSlice(req.Timeseries) + + return &client.WriteResponse{}, lastPartialErr + } + + now := time.Now() + if !d.ingestionRateLimiter.AllowN(now, userID, validatedSamples) { + // Ensure the request slice is reused if the request is rate limited. + client.ReuseSlice(req.Timeseries) + + // Return a 4xx here to have the client discard the data and not retry. If a client + // is sending too much data consistently we will unlikely ever catch up otherwise. + validation.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamples)) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples", d.ingestionRateLimiter.Limit(now, userID), numSamples) + } + + err = ring.DoBatch(ctx, d.ingestersRing, keys, func(ingester ring.IngesterDesc, indexes []int) error { + timeseries := make([]client.PreallocTimeseries, 0, len(indexes)) + for _, i := range indexes { + timeseries = append(timeseries, validatedTimeseries[i]) + } + + // Use a background context to make sure all ingesters get samples even if we return early + localCtx, cancel := context.WithTimeout(context.Background(), d.cfg.RemoteTimeout) + defer cancel() + localCtx = user.InjectOrgID(localCtx, userID) + if sp := opentracing.SpanFromContext(ctx); sp != nil { + localCtx = opentracing.ContextWithSpan(localCtx, sp) + } + return d.sendSamples(localCtx, ingester, timeseries) + }, func() { client.ReuseSlice(req.Timeseries) }) + if err != nil { + return nil, err + } + return &client.WriteResponse{}, lastPartialErr +} + +func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDesc, timeseries []client.PreallocTimeseries) error { + h, err := d.ingesterPool.GetClientFor(ingester.Addr) + if err != nil { + return err + } + c := h.(ingester_client.IngesterClient) + + req := client.WriteRequest{ + Timeseries: timeseries, + } + _, err = c.Push(ctx, &req) + + ingesterAppends.WithLabelValues(ingester.Addr).Inc() + if err != nil { + ingesterAppendFailures.WithLabelValues(ingester.Addr).Inc() + } + return err +} + +// forAllIngesters runs f, in parallel, for all ingesters +func (d *Distributor) forAllIngesters(ctx context.Context, reallyAll bool, f func(client.IngesterClient) (interface{}, error)) ([]interface{}, error) { + replicationSet, err := d.ingestersRing.GetAll() + if err != nil { + return nil, err + } + if reallyAll { + replicationSet.MaxErrors = 0 + } + + return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ing *ring.IngesterDesc) (interface{}, error) { + client, err := d.ingesterPool.GetClientFor(ing.Addr) + if err != nil { + return nil, err + } + + return f(client.(ingester_client.IngesterClient)) + }) +} + +// LabelValuesForLabelName returns all of the label values that are associated with a given label name. +func (d *Distributor) LabelValuesForLabelName(ctx context.Context, labelName model.LabelName) ([]string, error) { + req := &client.LabelValuesRequest{ + LabelName: string(labelName), + } + resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + return client.LabelValues(ctx, req) + }) + if err != nil { + return nil, err + } + + valueSet := map[string]struct{}{} + for _, resp := range resps { + for _, v := range resp.(*client.LabelValuesResponse).LabelValues { + valueSet[v] = struct{}{} + } + } + + values := make([]string, 0, len(valueSet)) + for v := range valueSet { + values = append(values, v) + } + return values, nil +} + +// LabelNames returns all of the label names. +func (d *Distributor) LabelNames(ctx context.Context) ([]string, error) { + req := &client.LabelNamesRequest{} + resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + return client.LabelNames(ctx, req) + }) + if err != nil { + return nil, err + } + + valueSet := map[string]struct{}{} + for _, resp := range resps { + for _, v := range resp.(*client.LabelNamesResponse).LabelNames { + valueSet[v] = struct{}{} + } + } + + values := make([]string, 0, len(valueSet)) + for v := range valueSet { + values = append(values, v) + } + sort.Slice(values, func(i, j int) bool { + return values[i] < values[j] + }) + + return values, nil +} + +// MetricsForLabelMatchers gets the metrics that match said matchers +func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) { + req, err := ingester_client.ToMetricsForLabelMatchersRequest(from, through, matchers) + if err != nil { + return nil, err + } + + resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + return client.MetricsForLabelMatchers(ctx, req) + }) + if err != nil { + return nil, err + } + + metrics := map[model.Fingerprint]model.Metric{} + for _, resp := range resps { + ms := ingester_client.FromMetricsForLabelMatchersResponse(resp.(*client.MetricsForLabelMatchersResponse)) + for _, m := range ms { + metrics[m.Fingerprint()] = m + } + } + + result := make([]metric.Metric, 0, len(metrics)) + for _, m := range metrics { + result = append(result, metric.Metric{ + Metric: m, + }) + } + return result, nil +} + +// UserStats returns statistics about the current user. +func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { + req := &client.UserStatsRequest{} + resps, err := d.forAllIngesters(ctx, true, func(client client.IngesterClient) (interface{}, error) { + return client.UserStats(ctx, req) + }) + if err != nil { + return nil, err + } + + totalStats := &UserStats{} + for _, resp := range resps { + r := resp.(*client.UserStatsResponse) + totalStats.IngestionRate += r.IngestionRate + totalStats.APIIngestionRate += r.ApiIngestionRate + totalStats.RuleIngestionRate += r.RuleIngestionRate + totalStats.NumSeries += r.NumSeries + } + + totalStats.IngestionRate /= float64(d.ingestersRing.ReplicationFactor()) + totalStats.NumSeries /= uint64(d.ingestersRing.ReplicationFactor()) + + return totalStats, nil +} + +// UserIDStats models ingestion statistics for one user, including the user ID +type UserIDStats struct { + UserID string `json:"userID"` + UserStats +} + +// AllUserStats returns statistics about all users. +// Note it does not divide by the ReplicationFactor like UserStats() +func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { + // Add up by user, across all responses from ingesters + perUserTotals := make(map[string]UserStats) + + req := &client.UserStatsRequest{} + ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID + // Not using d.forAllIngesters(), so we can fail after first error. + replicationSet, err := d.ingestersRing.GetAll() + if err != nil { + return nil, err + } + for _, ingester := range replicationSet.Ingesters { + client, err := d.ingesterPool.GetClientFor(ingester.Addr) + if err != nil { + return nil, err + } + resp, err := client.(ingester_client.IngesterClient).AllUserStats(ctx, req) + if err != nil { + return nil, err + } + for _, u := range resp.Stats { + s := perUserTotals[u.UserId] + s.IngestionRate += u.Data.IngestionRate + s.APIIngestionRate += u.Data.ApiIngestionRate + s.RuleIngestionRate += u.Data.RuleIngestionRate + s.NumSeries += u.Data.NumSeries + perUserTotals[u.UserId] = s + } + } + + // Turn aggregated map into a slice for return + response := make([]UserIDStats, 0, len(perUserTotals)) + for id, stats := range perUserTotals { + response = append(response, UserIDStats{ + UserID: id, + UserStats: UserStats{ + IngestionRate: stats.IngestionRate, + APIIngestionRate: stats.APIIngestionRate, + RuleIngestionRate: stats.RuleIngestionRate, + NumSeries: stats.NumSeries, + }, + }) + } + + return response, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go new file mode 100644 index 000000000000..673da46e7e38 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go @@ -0,0 +1,87 @@ +package distributor + +import ( + "flag" + "os" + "time" + + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/go-kit/kit/log/level" +) + +// RingConfig masks the ring lifecycler config which contains +// many options not really required by the distributors ring. This config +// is used to strip down the config to the minimum, and avoid confusion +// to the user. +type RingConfig struct { + KVStore kv.Config `yaml:"kvstore,omitempty"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period,omitempty"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout,omitempty"` + + // Instance details + InstanceID string `yaml:"instance_id"` + InstanceInterfaceNames []string `yaml:"instance_interface_names"` + InstancePort int `yaml:"instance_port"` + InstanceAddr string `yaml:"instance_addr"` + + // Injected internally + ListenPort int `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { + hostname, err := os.Hostname() + if err != nil { + level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + + // Ring flags + cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", f) + f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring.") + + // Instance flags + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} + f.Var((*flagext.Strings)(&cfg.InstanceInterfaceNames), "distributor.ring.instance-interface", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, "distributor.ring.instance-addr", "", "IP address to advertise in the ring.") + f.IntVar(&cfg.InstancePort, "distributor.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") + f.StringVar(&cfg.InstanceID, "distributor.ring.instance-id", hostname, "Instance ID to register in the ring.") +} + +// ToLifecyclerConfig returns a LifecyclerConfig based on the distributor +// ring config. +func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { + // We have to make sure that the ring.LifecyclerConfig and ring.Config + // defaults are preserved + lc := ring.LifecyclerConfig{} + rc := ring.Config{} + + flagext.DefaultValues(&lc) + flagext.DefaultValues(&rc) + + // Configure ring + rc.KVStore = cfg.KVStore + rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.ReplicationFactor = 1 + + // Configure lifecycler + lc.RingConfig = rc + lc.ListenPort = &cfg.ListenPort + lc.Addr = cfg.InstanceAddr + lc.Port = cfg.InstancePort + lc.ID = cfg.InstanceID + lc.InfNames = cfg.InstanceInterfaceNames + lc.SkipUnregister = false + lc.HeartbeatPeriod = cfg.HeartbeatPeriod + lc.ObservePeriod = 0 + lc.NumTokens = 1 + lc.JoinAfter = 0 + lc.MinReadyDuration = 0 + lc.FinalSleep = 0 + + return lc +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go new file mode 100644 index 000000000000..cba42bab8f7c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go @@ -0,0 +1,286 @@ +package distributor + +import ( + "context" + "errors" + "flag" + "fmt" + "math/rand" + "net/http" + "strings" + "sync" + "time" + + "github.com/weaveworks/common/httpgrpc" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/weaveworks/common/mtime" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util" +) + +var ( + electedReplicaChanges = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "ha_tracker_elected_replica_changes_total", + Help: "The total number of times the elected replica has changed for a user ID/cluster.", + }, []string{"user", "cluster"}) + electedReplicaTimestamp = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "ha_tracker_elected_replica_timestamp_seconds", + Help: "The timestamp stored for the currently elected replica, from the KVStore.", + }, []string{"user", "cluster"}) + electedReplicaPropagationTime = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "ha_tracker_elected_replica_change_propagation_time_seconds", + Help: "The time it for the distributor to update the replica change.", + Buckets: prometheus.DefBuckets, + }) + kvCASCalls = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "ha_tracker_kv_store_cas_total", + Help: "The total number of CAS calls to the KV store for a user ID/cluster.", + }, []string{"user", "cluster"}) + + errNegativeUpdateTimeoutJitterMax = errors.New("HA tracker max update timeout jitter shouldn't be negative") + errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)" +) + +// ProtoReplicaDescFactory makes new InstanceDescs +func ProtoReplicaDescFactory() proto.Message { + return NewReplicaDesc() +} + +// NewReplicaDesc returns an empty *distributor.ReplicaDesc. +func NewReplicaDesc() *ReplicaDesc { + return &ReplicaDesc{} +} + +// Track the replica we're accepting samples from +// for each HA cluster we know about. +type haTracker struct { + logger log.Logger + cfg HATrackerConfig + client kv.Client + updateTimeoutJitter time.Duration + + // Replicas we are accepting samples from. + electedLock sync.RWMutex + elected map[string]ReplicaDesc + done chan struct{} + cancel context.CancelFunc +} + +// HATrackerConfig contains the configuration require to +// create a HA Tracker. +type HATrackerConfig struct { + EnableHATracker bool `yaml:"enable_ha_tracker,omitempty"` + // We should only update the timestamp if the difference + // between the stored timestamp and the time we received a sample at + // is more than this duration. + UpdateTimeout time.Duration `yaml:"ha_tracker_update_timeout"` + UpdateTimeoutJitterMax time.Duration `yaml:"ha_tracker_update_timeout_jitter_max"` + // We should only failover to accepting samples from a replica + // other than the replica written in the KVStore if the difference + // between the stored timestamp and the time we received a sample is + // more than this duration + FailoverTimeout time.Duration `yaml:"ha_tracker_failover_timeout"` + + KVStore kv.Config +} + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *HATrackerConfig) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&cfg.EnableHATracker, + "distributor.ha-tracker.enable", + false, + "Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).") + f.DurationVar(&cfg.UpdateTimeout, + "distributor.ha-tracker.update-timeout", + 15*time.Second, + "Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.") + f.DurationVar(&cfg.UpdateTimeoutJitterMax, + "distributor.ha-tracker.update-timeout-jitter-max", + 5*time.Second, + "To spread the HA deduping heartbeats out over time.") + f.DurationVar(&cfg.FailoverTimeout, + "distributor.ha-tracker.failover-timeout", + 30*time.Second, + "If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout") + // We want the ability to use different Consul instances for the ring and for HA cluster tracking. + cfg.KVStore.RegisterFlagsWithPrefix("distributor.ha-tracker.", f) +} + +// Validate config and returns error on failure +func (cfg *HATrackerConfig) Validate() error { + if cfg.UpdateTimeoutJitterMax < 0 { + return errNegativeUpdateTimeoutJitterMax + } + + minFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second + if cfg.FailoverTimeout < minFailureTimeout { + return fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout) + } + + return nil +} + +// NewClusterTracker returns a new HA cluster tracker using either Consul +// or in-memory KV store. +func newClusterTracker(cfg HATrackerConfig) (*haTracker, error) { + codec := codec.Proto{Factory: ProtoReplicaDescFactory} + + var jitter time.Duration + if cfg.UpdateTimeoutJitterMax > 0 { + jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax + } + + ctx, cancel := context.WithCancel(context.Background()) + t := haTracker{ + logger: util.Logger, + cfg: cfg, + updateTimeoutJitter: jitter, + done: make(chan struct{}), + elected: map[string]ReplicaDesc{}, + cancel: cancel, + } + + if cfg.EnableHATracker { + client, err := kv.NewClient(cfg.KVStore, codec) + if err != nil { + return nil, err + } + t.client = client + go t.loop(ctx) + } + return &t, nil +} + +// Follows pattern used by ring for WatchKey. +func (c *haTracker) loop(ctx context.Context) { + defer close(c.done) + // The KVStore config we gave when creating c should have contained a prefix, + // which would have given us a prefixed KVStore client. So, we can pass empty string here. + c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool { + replica := value.(*ReplicaDesc) + c.electedLock.Lock() + defer c.electedLock.Unlock() + chunks := strings.SplitN(key, "/", 2) + + // The prefix has already been stripped, so a valid key would look like cluster/replica, + // and a key without a / such as `ring` would be invalid. + if len(chunks) != 2 { + return true + } + + if replica.Replica != c.elected[key].Replica { + electedReplicaChanges.WithLabelValues(chunks[0], chunks[1]).Inc() + } + c.elected[key] = *replica + electedReplicaTimestamp.WithLabelValues(chunks[0], chunks[1]).Set(float64(replica.ReceivedAt / 1000)) + electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds()) + return true + }) +} + +// Stop ends calls the trackers cancel function, which will end the loop for WatchPrefix. +func (c *haTracker) stop() { + if c.cfg.EnableHATracker { + c.cancel() + <-c.done + } +} + +// CheckReplica checks the cluster and replica against the backing KVStore and local cache in the +// tracker c to see if we should accept the incomming sample. It will return an error if the sample +// should not be accepted. Note that internally this function does checks against the stored values +// and may modify the stored data, for example to failover between replicas after a certain period of time. +// A 202 response code is returned (from checkKVstore) if we shouldn't store this sample but are +// accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned +// to customers clients. +func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string) error { + // If HA tracking isn't enabled then accept the sample + if !c.cfg.EnableHATracker { + return nil + } + key := fmt.Sprintf("%s/%s", userID, cluster) + now := mtime.Now() + c.electedLock.RLock() + entry, ok := c.elected[key] + c.electedLock.RUnlock() + if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter { + if entry.Replica != replica { + return replicasNotMatchError(replica, entry.Replica) + } + return nil + } + + err := c.checkKVStore(ctx, key, replica, now) + kvCASCalls.WithLabelValues(userID, cluster).Inc() + if err != nil { + // The callback within checkKVStore will return a 202 if the sample is being deduped, + // otherwise there may have been an actual error CAS'ing that we should log. + if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 { + level.Error(util.Logger).Log("msg", "rejecting sample", "error", err) + } + } + return err +} + +func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error { + return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + if desc, ok := in.(*ReplicaDesc); ok { + + // We don't need to CAS and update the timestamp in the KV store if the timestamp we've received + // this sample at is less than updateTimeout amount of time since the timestamp in the KV store. + if desc.Replica == replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter { + return nil, false, nil + } + + // We shouldn't failover to accepting a new replica if the timestamp we've received this sample at + // is less than failOver timeout amount of time since the timestamp in the KV store. + if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout { + // Return a 202. + return nil, false, replicasNotMatchError(replica, desc.Replica) + } + } + + // There was either invalid or no data for the key, so we now accept samples + // from this replica. Invalid could mean that the timestamp in the KV store was + // out of date based on the update and failover timeouts when compared to now. + return &ReplicaDesc{ + Replica: replica, ReceivedAt: timestamp.FromTime(now), + }, true, nil + }) +} + +func replicasNotMatchError(replica, elected string) error { + return httpgrpc.Errorf(http.StatusAccepted, "replicas did not mach, rejecting sample: replica=%s, elected=%s", replica, elected) +} + +// Modifies the labels parameter in place, removing labels that match +// the replica or cluster label and returning their values. Returns an error +// if we find one but not both of the labels. +func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapter) (string, string) { + var cluster, replica string + var pair client.LabelAdapter + + for _, pair = range labels { + if pair.Name == replicaLabel { + replica = string(pair.Value) + } + if pair.Name == clusterLabel { + cluster = string(pair.Value) + } + } + + return cluster, replica +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go new file mode 100644 index 000000000000..aa241c2c53c4 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.pb.go @@ -0,0 +1,446 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ha_tracker.proto + +package distributor + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ReplicaDesc struct { + Replica string `protobuf:"bytes,1,opt,name=replica,proto3" json:"replica,omitempty"` + ReceivedAt int64 `protobuf:"varint,2,opt,name=receivedAt,proto3" json:"receivedAt,omitempty"` +} + +func (m *ReplicaDesc) Reset() { *m = ReplicaDesc{} } +func (*ReplicaDesc) ProtoMessage() {} +func (*ReplicaDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_86f0e7bcf71d860b, []int{0} +} +func (m *ReplicaDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReplicaDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReplicaDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaDesc.Merge(m, src) +} +func (m *ReplicaDesc) XXX_Size() int { + return m.Size() +} +func (m *ReplicaDesc) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaDesc proto.InternalMessageInfo + +func (m *ReplicaDesc) GetReplica() string { + if m != nil { + return m.Replica + } + return "" +} + +func (m *ReplicaDesc) GetReceivedAt() int64 { + if m != nil { + return m.ReceivedAt + } + return 0 +} + +func init() { + proto.RegisterType((*ReplicaDesc)(nil), "distributor.ReplicaDesc") +} + +func init() { proto.RegisterFile("ha_tracker.proto", fileDescriptor_86f0e7bcf71d860b) } + +var fileDescriptor_86f0e7bcf71d860b = []byte{ + // 201 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0x8c, 0x2f, + 0x29, 0x4a, 0x4c, 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0xc9, + 0x2c, 0x2e, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, + 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0xab, 0x49, 0x2a, 0x4d, + 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x57, 0xc9, 0x9d, 0x8b, 0x3b, 0x28, 0xb5, 0x20, 0x27, + 0x33, 0x39, 0xd1, 0x25, 0xb5, 0x38, 0x59, 0x48, 0x82, 0x8b, 0xbd, 0x08, 0xc2, 0x95, 0x60, 0x54, + 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x71, 0x85, 0xe4, 0xb8, 0xb8, 0x8a, 0x52, 0x93, 0x53, 0x33, 0xcb, + 0x52, 0x53, 0x1c, 0x4b, 0x24, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0x90, 0x44, 0x9c, 0x4c, 0x2e, + 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, + 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, + 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, + 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0x5d, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0xa4, 0xe2, 0xd2, 0xff, 0xd5, 0x00, 0x00, 0x00, +} + +func (this *ReplicaDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ReplicaDesc) + if !ok { + that2, ok := that.(ReplicaDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Replica != that1.Replica { + return false + } + if this.ReceivedAt != that1.ReceivedAt { + return false + } + return true +} +func (this *ReplicaDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&distributor.ReplicaDesc{") + s = append(s, "Replica: "+fmt.Sprintf("%#v", this.Replica)+",\n") + s = append(s, "ReceivedAt: "+fmt.Sprintf("%#v", this.ReceivedAt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringHaTracker(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ReplicaDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Replica) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintHaTracker(dAtA, i, uint64(len(m.Replica))) + i += copy(dAtA[i:], m.Replica) + } + if m.ReceivedAt != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintHaTracker(dAtA, i, uint64(m.ReceivedAt)) + } + return i, nil +} + +func encodeVarintHaTracker(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ReplicaDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Replica) + if l > 0 { + n += 1 + l + sovHaTracker(uint64(l)) + } + if m.ReceivedAt != 0 { + n += 1 + sovHaTracker(uint64(m.ReceivedAt)) + } + return n +} + +func sovHaTracker(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozHaTracker(x uint64) (n int) { + return sovHaTracker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ReplicaDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaDesc{`, + `Replica:` + fmt.Sprintf("%v", this.Replica) + `,`, + `ReceivedAt:` + fmt.Sprintf("%v", this.ReceivedAt) + `,`, + `}`, + }, "") + return s +} +func valueToStringHaTracker(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ReplicaDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHaTracker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replica", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHaTracker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHaTracker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHaTracker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Replica = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReceivedAt", wireType) + } + m.ReceivedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHaTracker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReceivedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipHaTracker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHaTracker + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHaTracker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHaTracker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHaTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHaTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHaTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthHaTracker + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthHaTracker + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHaTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHaTracker(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthHaTracker + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHaTracker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHaTracker = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto new file mode 100644 index 000000000000..a4b4faa6b22e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package distributor; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message ReplicaDesc { + string replica = 1; + int64 receivedAt = 2; +} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go new file mode 100644 index 000000000000..68146b4224e8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker_http.go @@ -0,0 +1,99 @@ +package distributor + +import ( + "html/template" + "net/http" + "sort" + "strings" + "time" + + "github.com/prometheus/prometheus/pkg/timestamp" +) + +const trackerTpl = ` + + + + + Cortex HA Tracker Status + + +

Cortex HA Tracker Status

+

Current time: {{ .Now }}

+ + + + + + + + + + + + + {{ range .Elected }} + + + + + + + + + {{ end }} + +
User IDClusterReplicaElected TimeTime Until UpdateTime Until Failover
{{ .UserID }}{{ .Cluster }}{{ .Replica }}{{ .ElectedAt }}{{ .UpdateTime }}{{ .FailoverTime }}
+ +` + +var trackerTmpl *template.Template + +func init() { + trackerTmpl = template.Must(template.New("ha-tracker").Parse(trackerTpl)) +} + +func (h *haTracker) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h.electedLock.RLock() + type replica struct { + UserID, Cluster, Replica string + ElectedAt time.Time + UpdateTime, FailoverTime time.Duration + } + + electedReplicas := []replica{} + for key, desc := range h.elected { + chunks := strings.SplitN(key, "/", 2) + + electedReplicas = append(electedReplicas, replica{ + UserID: chunks[0], + Cluster: chunks[1], + Replica: desc.Replica, + ElectedAt: timestamp.Time(desc.ReceivedAt), + UpdateTime: time.Until(timestamp.Time(desc.ReceivedAt).Add(h.cfg.UpdateTimeout)), + FailoverTime: time.Until(timestamp.Time(desc.ReceivedAt).Add(h.cfg.FailoverTimeout)), + }) + } + h.electedLock.RUnlock() + + sort.Slice(electedReplicas, func(i, j int) bool { + first := electedReplicas[i] + second := electedReplicas[j] + + if first.UserID != second.UserID { + return first.UserID < second.UserID + } + return first.Cluster < second.Cluster + }) + + if err := trackerTmpl.Execute(w, struct { + Elected []replica + Now time.Time + }{ + Elected: electedReplicas, + Now: time.Now(), + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go new file mode 100644 index 000000000000..18bbe1a1421a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/http_admin.go @@ -0,0 +1,98 @@ +package distributor + +import ( + "encoding/json" + "fmt" + "html/template" + "net/http" + "sort" + "strings" + "time" +) + +const tpl = ` + + + + + Cortex Ingester Stats + + +

Cortex Ingester Stats

+

Current time: {{ .Now }}

+

NB stats do not account for replication factor, which is currently set to {{ .ReplicationFactor }}

+
+ + + + + + + + + + + + + {{ range .Stats }} + + + + + + + + {{ end }} + +
User# SeriesTotal Ingest RateAPI Ingest RateRule Ingest Rate
{{ .UserID }}{{ .UserStats.NumSeries }}{{ printf "%.2f" .UserStats.IngestionRate }}{{ printf "%.2f" .UserStats.APIIngestionRate }}{{ printf "%.2f" .UserStats.RuleIngestionRate }}
+
+ +` + +var tmpl *template.Template + +func init() { + tmpl = template.Must(template.New("webpage").Parse(tpl)) +} + +type userStatsByTimeseries []UserIDStats + +func (s userStatsByTimeseries) Len() int { return len(s) } +func (s userStatsByTimeseries) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s userStatsByTimeseries) Less(i, j int) bool { + return s[i].NumSeries > s[j].NumSeries || + (s[i].NumSeries == s[j].NumSeries && s[i].UserID < s[j].UserID) +} + +// AllUserStatsHandler shows stats for all users. +func (d *Distributor) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { + stats, err := d.AllUserStats(r.Context()) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + sort.Sort(userStatsByTimeseries(stats)) + + if encodings, found := r.Header["Accept"]; found && + len(encodings) > 0 && strings.Contains(encodings[0], "json") { + if err := json.NewEncoder(w).Encode(stats); err != nil { + http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) + } + return + } + + if err := tmpl.Execute(w, struct { + Now time.Time + Stats []UserIDStats + ReplicationFactor int + }{ + Now: time.Now(), + Stats: stats, + ReplicationFactor: d.ingestersRing.ReplicationFactor(), + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go new file mode 100644 index 000000000000..a0c23836544a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/http_server.go @@ -0,0 +1,108 @@ +package distributor + +import ( + "fmt" + "net/http" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/prometheus/promql" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" + "github.com/weaveworks/common/httpgrpc" +) + +// PushHandler is a http.Handler which accepts WriteRequests. +func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { + compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Write-Version")) + var req client.PreallocWriteRequest + req.Source = client.API + buf, err := util.ParseProtoReader(r.Context(), r.Body, int(r.ContentLength), d.cfg.MaxRecvMsgSize, &req, compressionType) + logger := util.WithContext(r.Context(), util.Logger) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if d.cfg.EnableBilling { + var samples int64 + for _, ts := range req.Timeseries { + samples += int64(len(ts.Samples)) + } + if err := d.emitBillingRecord(r.Context(), buf, samples); err != nil { + level.Error(logger).Log("msg", "error emitting billing record", "err", err) + } + } + + if _, err := d.Push(r.Context(), &req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode() != 202 { + level.Error(logger).Log("msg", "push error", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } +} + +// UserStats models ingestion statistics for one user. +type UserStats struct { + IngestionRate float64 `json:"ingestionRate"` + NumSeries uint64 `json:"numSeries"` + APIIngestionRate float64 `json:"APIIngestionRate"` + RuleIngestionRate float64 `json:"RuleIngestionRate"` +} + +// UserStatsHandler handles user stats to the Distributor. +func (d *Distributor) UserStatsHandler(w http.ResponseWriter, r *http.Request) { + stats, err := d.UserStats(r.Context()) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + util.WriteJSONResponse(w, stats) +} + +// ValidateExprHandler validates a PromQL expression. +func (d *Distributor) ValidateExprHandler(w http.ResponseWriter, r *http.Request) { + _, err := promql.ParseExpr(r.FormValue("expr")) + + // We mimick the response format of Prometheus's official API here for + // consistency, but unfortunately its private types (string consts etc.) + // aren't reusable. + if err == nil { + util.WriteJSONResponse(w, map[string]string{ + "status": "success", + }) + return + } + + parseErr, ok := err.(*promql.ParseErr) + if !ok { + // This should always be a promql.ParseErr. + http.Error(w, fmt.Sprintf("unexpected error returned from PromQL parser: %v", err), http.StatusInternalServerError) + return + } + + // If the parsing input was a single line, parseErr.Line is 0 + // and the generated error string omits the line entirely. But we + // want to report line numbers consistently, no matter how many + // lines there are (starting at 1). + if parseErr.Line == 0 { + parseErr.Line = 1 + } + w.WriteHeader(http.StatusBadRequest) + util.WriteJSONResponse(w, map[string]interface{}{ + "status": "error", + "errorType": "bad_data", + "error": err.Error(), + "location": map[string]int{ + "line": parseErr.Line, + "pos": parseErr.Pos, + }, + }) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go new file mode 100644 index 000000000000..88b2b401981b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ingestion_rate_strategy.go @@ -0,0 +1,73 @@ +package distributor + +import ( + "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/validation" + "golang.org/x/time/rate" +) + +// ReadLifecycler represents the read interface to the lifecycler. +type ReadLifecycler interface { + HealthyInstancesCount() int +} + +type localStrategy struct { + limits *validation.Overrides +} + +func newLocalIngestionRateStrategy(limits *validation.Overrides) limiter.RateLimiterStrategy { + return &localStrategy{ + limits: limits, + } +} + +func (s *localStrategy) Limit(tenantID string) float64 { + return s.limits.IngestionRate(tenantID) +} + +func (s *localStrategy) Burst(tenantID string) int { + return s.limits.IngestionBurstSize(tenantID) +} + +type globalStrategy struct { + limits *validation.Overrides + ring ReadLifecycler +} + +func newGlobalIngestionRateStrategy(limits *validation.Overrides, ring ReadLifecycler) limiter.RateLimiterStrategy { + return &globalStrategy{ + limits: limits, + ring: ring, + } +} + +func (s *globalStrategy) Limit(tenantID string) float64 { + numDistributors := s.ring.HealthyInstancesCount() + + if numDistributors == 0 { + return s.limits.IngestionRate(tenantID) + } + + return s.limits.IngestionRate(tenantID) / float64(numDistributors) +} + +func (s *globalStrategy) Burst(tenantID string) int { + // The meaning of burst doesn't change for the global strategy, in order + // to keep it easier to understand for users / operators. + return s.limits.IngestionBurstSize(tenantID) +} + +type infiniteStrategy struct{} + +func newInfiniteIngestionRateStrategy() limiter.RateLimiterStrategy { + return &infiniteStrategy{} +} + +func (s *infiniteStrategy) Limit(tenantID string) float64 { + return float64(rate.Inf) +} + +func (s *infiniteStrategy) Burst(tenantID string) int { + // Burst is ignored when limit = rate.Inf + return 0 +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go new file mode 100644 index 000000000000..28d60b8642dc --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go @@ -0,0 +1,175 @@ +package distributor + +import ( + "context" + "io" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + + "github.com/cortexproject/cortex/pkg/ingester/client" + ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/weaveworks/common/instrument" + "github.com/weaveworks/common/user" +) + +// Query multiple ingesters and returns a Matrix of samples. +func (d *Distributor) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) { + var matrix model.Matrix + err := instrument.CollectedRequest(ctx, "Distributor.Query", queryDuration, instrument.ErrorCode, func(ctx context.Context) error { + replicationSet, req, err := d.queryPrep(ctx, from, to, matchers...) + if err != nil { + return promql.ErrStorage{Err: err} + } + + matrix, err = d.queryIngesters(ctx, replicationSet, req) + if err != nil { + return promql.ErrStorage{Err: err} + } + return nil + }) + return matrix, err +} + +// QueryStream multiple ingesters via the streaming interface and returns big ol' set of chunks. +func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) ([]client.TimeSeriesChunk, error) { + var result []client.TimeSeriesChunk + err := instrument.CollectedRequest(ctx, "Distributor.QueryStream", queryDuration, instrument.ErrorCode, func(ctx context.Context) error { + replicationSet, req, err := d.queryPrep(ctx, from, to, matchers...) + if err != nil { + return promql.ErrStorage{Err: err} + } + + result, err = d.queryIngesterStream(ctx, replicationSet, req) + if err != nil { + return promql.ErrStorage{Err: err} + } + return nil + }) + return result, err +} + +func (d *Distributor) queryPrep(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (ring.ReplicationSet, *client.QueryRequest, error) { + var replicationSet ring.ReplicationSet + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return replicationSet, nil, err + } + + req, err := ingester_client.ToQueryRequest(from, to, matchers) + if err != nil { + return replicationSet, nil, err + } + + // Get ingesters by metricName if one exists, otherwise get all ingesters + metricNameMatcher, _, ok := extract.MetricNameMatcherFromMatchers(matchers) + if !d.cfg.ShardByAllLabels && ok && metricNameMatcher.Type == labels.MatchEqual { + replicationSet, err = d.ingestersRing.Get(shardByMetricName(userID, metricNameMatcher.Value), ring.Read, nil) + } else { + replicationSet, err = d.ingestersRing.GetAll() + } + return replicationSet, req, err +} + +// queryIngesters queries the ingesters via the older, sample-based API. +func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.ReplicationSet, req *client.QueryRequest) (model.Matrix, error) { + // Fetch samples from multiple ingesters in parallel, using the replicationSet + // to deal with consistency. + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ing *ring.IngesterDesc) (interface{}, error) { + client, err := d.ingesterPool.GetClientFor(ing.Addr) + if err != nil { + return nil, err + } + + resp, err := client.(ingester_client.IngesterClient).Query(ctx, req) + ingesterQueries.WithLabelValues(ing.Addr).Inc() + if err != nil { + ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() + return nil, err + } + + return ingester_client.FromQueryResponse(resp), nil + }) + if err != nil { + return nil, err + } + + // Merge the results into a single matrix. + fpToSampleStream := map[model.Fingerprint]*model.SampleStream{} + for _, result := range results { + for _, ss := range result.(model.Matrix) { + fp := ss.Metric.Fingerprint() + mss, ok := fpToSampleStream[fp] + if !ok { + mss = &model.SampleStream{ + Metric: ss.Metric, + } + fpToSampleStream[fp] = mss + } + mss.Values = util.MergeSampleSets(mss.Values, ss.Values) + } + } + result := model.Matrix{} + for _, ss := range fpToSampleStream { + result = append(result, ss) + } + + return result, nil +} + +// queryIngesterStream queries the ingesters using the new streaming API. +func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *client.QueryRequest) ([]client.TimeSeriesChunk, error) { + // Fetch samples from multiple ingesters + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ing *ring.IngesterDesc) (interface{}, error) { + client, err := d.ingesterPool.GetClientFor(ing.Addr) + if err != nil { + return nil, err + } + ingesterQueries.WithLabelValues(ing.Addr).Inc() + + stream, err := client.(ingester_client.IngesterClient).QueryStream(ctx, req) + if err != nil { + ingesterQueryFailures.WithLabelValues(ing.Addr).Inc() + return nil, err + } + defer stream.CloseSend() + + var result []*ingester_client.QueryStreamResponse + for { + series, err := stream.Recv() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + result = append(result, series) + } + return result, nil + }) + if err != nil { + return nil, err + } + + hashToSeries := map[model.Fingerprint]ingester_client.TimeSeriesChunk{} + for _, result := range results { + for _, response := range result.([]*ingester_client.QueryStreamResponse) { + for _, series := range response.Timeseries { + hash := client.FastFingerprint(series.Labels) + existing := hashToSeries[hash] + existing.Labels = series.Labels + existing.Chunks = append(existing.Chunks, series.Chunks...) + hashToSeries[hash] = existing + } + } + } + result := make([]client.TimeSeriesChunk, 0, len(hashToSeries)) + for _, series := range hashToSeries { + result = append(result, series) + } + + return result, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go b/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go new file mode 100644 index 000000000000..9677d1eca981 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/limiter/rate_limiter.go @@ -0,0 +1,122 @@ +package limiter + +import ( + "sync" + "time" + + "golang.org/x/time/rate" +) + +// RateLimiterStrategy defines the interface which a pluggable strategy should +// implement. The returned limit and burst can change over the time, and the +// local rate limiter will apply them every recheckPeriod. +type RateLimiterStrategy interface { + Limit(tenantID string) float64 + Burst(tenantID string) int +} + +// RateLimiter is a multi-tenant local rate limiter based on golang.org/x/time/rate. +// It requires a custom strategy in input, which is used to get the limit and burst +// settings for each tenant. +type RateLimiter struct { + strategy RateLimiterStrategy + recheckPeriod time.Duration + + tenantsLock sync.RWMutex + tenants map[string]*tenantLimiter +} + +type tenantLimiter struct { + limiter *rate.Limiter + recheckAt time.Time +} + +// NewRateLimiter makes a new multi-tenant rate limiter. Each per-tenant limiter +// is configured using the input strategy and its limit/burst is rechecked (and +// reconfigured if changed) every recheckPeriod. +func NewRateLimiter(strategy RateLimiterStrategy, recheckPeriod time.Duration) *RateLimiter { + return &RateLimiter{ + strategy: strategy, + recheckPeriod: recheckPeriod, + tenants: map[string]*tenantLimiter{}, + } +} + +// AllowN reports whether n tokens may be consumed happen at time now. +func (l *RateLimiter) AllowN(now time.Time, tenantID string, n int) bool { + return l.getTenantLimiter(now, tenantID).AllowN(now, n) +} + +// Limit returns the currently configured maximum overall tokens rate. +func (l *RateLimiter) Limit(now time.Time, tenantID string) float64 { + return float64(l.getTenantLimiter(now, tenantID).Limit()) +} + +// Burst returns the currently configured maximum burst size. +func (l *RateLimiter) Burst(now time.Time, tenantID string) int { + return l.getTenantLimiter(now, tenantID).Burst() +} + +func (l *RateLimiter) getTenantLimiter(now time.Time, tenantID string) *rate.Limiter { + recheck := false + + // Check if the per-tenant limiter already exists and if should + // be rechecked because the recheck period has elapsed + l.tenantsLock.RLock() + entry, ok := l.tenants[tenantID] + if ok && !now.Before(entry.recheckAt) { + recheck = true + } + l.tenantsLock.RUnlock() + + // If the limiter already exist, we return it, making sure to recheck it + // if the recheck period has elapsed + if ok && recheck { + return l.recheckTenantLimiter(now, tenantID) + } else if ok { + return entry.limiter + } + + // Create a new limiter + limit := rate.Limit(l.strategy.Limit(tenantID)) + burst := l.strategy.Burst(tenantID) + limiter := rate.NewLimiter(limit, burst) + + l.tenantsLock.Lock() + if entry, ok = l.tenants[tenantID]; !ok { + entry = &tenantLimiter{limiter, now.Add(l.recheckPeriod)} + l.tenants[tenantID] = entry + } + l.tenantsLock.Unlock() + + return entry.limiter +} + +func (l *RateLimiter) recheckTenantLimiter(now time.Time, tenantID string) *rate.Limiter { + limit := rate.Limit(l.strategy.Limit(tenantID)) + burst := l.strategy.Burst(tenantID) + + l.tenantsLock.Lock() + defer l.tenantsLock.Unlock() + + entry, _ := l.tenants[tenantID] + + // We check again if the recheck period elapsed, cause it may + // have already been rechecked in the meanwhile. + if now.Before(entry.recheckAt) { + return entry.limiter + } + + // Ensure the limiter's limit and burst match the expected value + if entry.limiter.Limit() != limit { + entry.limiter.SetLimitAt(now, limit) + } + + if entry.limiter.Burst() != burst { + entry.limiter.SetBurstAt(now, burst) + } + + entry.recheckAt = now.Add(l.recheckPeriod) + + return entry.limiter +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go b/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go new file mode 100644 index 000000000000..115b97f26360 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go @@ -0,0 +1,26 @@ +package test + +import ( + "reflect" + "testing" + "time" +) + +// Poll repeatedly evaluates condition until we either timeout, or it succeeds. +func Poll(t *testing.T, d time.Duration, want interface{}, have func() interface{}) { + t.Helper() + deadline := time.Now().Add(d) + for { + if time.Now().After(deadline) { + break + } + if reflect.DeepEqual(want, have()) { + return + } + time.Sleep(d / 10) + } + h := have() + if !reflect.DeepEqual(want, h) { + t.Fatalf("%v != %v", want, h) + } +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/LICENSE b/vendor/github.com/fluent/fluent-logger-golang/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go new file mode 100644 index 000000000000..655f6233ca0a --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go @@ -0,0 +1,309 @@ +package fluent + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "reflect" + "strconv" + "sync" + "time" +) + +const ( + defaultHost = "127.0.0.1" + defaultNetwork = "tcp" + defaultSocketPath = "" + defaultPort = 24224 + defaultTimeout = 3 * time.Second + defaultBufferLimit = 8 * 1024 * 1024 + defaultRetryWait = 500 + defaultMaxRetry = 13 + defaultReconnectWaitIncreRate = 1.5 +) + +type Config struct { + FluentPort int `json:"fluent_port"` + FluentHost string `json:"fluent_host"` + FluentNetwork string `json:"fluent_network"` + FluentSocketPath string `json:"fluent_socket_path"` + Timeout time.Duration `json:"timeout"` + BufferLimit int `json:"buffer_limit"` + RetryWait int `json:"retry_wait"` + MaxRetry int `json:"max_retry"` + TagPrefix string `json:"tag_prefix"` + AsyncConnect bool `json:"async_connect"` + MarshalAsJSON bool `json:"marshal_as_json"` +} + +type Fluent struct { + Config + + mubuff sync.Mutex + pending []byte + + muconn sync.Mutex + conn io.WriteCloser + reconnecting bool +} + +// New creates a new Logger. +func New(config Config) (f *Fluent, err error) { + if config.FluentNetwork == "" { + config.FluentNetwork = defaultNetwork + } + if config.FluentHost == "" { + config.FluentHost = defaultHost + } + if config.FluentPort == 0 { + config.FluentPort = defaultPort + } + if config.FluentSocketPath == "" { + config.FluentSocketPath = defaultSocketPath + } + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + if config.BufferLimit == 0 { + config.BufferLimit = defaultBufferLimit + } + if config.RetryWait == 0 { + config.RetryWait = defaultRetryWait + } + if config.MaxRetry == 0 { + config.MaxRetry = defaultMaxRetry + } + if config.AsyncConnect { + f = &Fluent{Config: config, reconnecting: true} + go f.reconnect() + } else { + f = &Fluent{Config: config, reconnecting: false} + err = f.connect() + } + return +} + +// Post writes the output for a logging event. +// +// Examples: +// +// // send string +// f.Post("tag_name", "data") +// +// // send map[string] +// mapStringData := map[string]string{ +// "foo": "bar", +// } +// f.Post("tag_name", mapStringData) +// +// // send message with specified time +// mapStringData := map[string]string{ +// "foo": "bar", +// } +// tm := time.Now() +// f.PostWithTime("tag_name", tm, mapStringData) +// +// // send struct +// structData := struct { +// Name string `msg:"name"` +// } { +// "john smith", +// } +// f.Post("tag_name", structData) +// +func (f *Fluent) Post(tag string, message interface{}) error { + timeNow := time.Now() + return f.PostWithTime(tag, timeNow, message) +} + +func (f *Fluent) PostWithTime(tag string, tm time.Time, message interface{}) error { + if len(f.TagPrefix) > 0 { + tag = f.TagPrefix + "." + tag + } + + msg := reflect.ValueOf(message) + msgtype := msg.Type() + + if msgtype.Kind() == reflect.Struct { + // message should be tagged by "codec" or "msg" + kv := make(map[string]interface{}) + fields := msgtype.NumField() + for i := 0; i < fields; i++ { + field := msgtype.Field(i) + name := field.Name + if n1 := field.Tag.Get("msg"); n1 != "" { + name = n1 + } else if n2 := field.Tag.Get("codec"); n2 != "" { + name = n2 + } + kv[name] = msg.FieldByIndex(field.Index).Interface() + } + return f.EncodeAndPostData(tag, tm, kv) + } + + if msgtype.Kind() != reflect.Map { + return errors.New("fluent#PostWithTime: message must be a map") + } else if msgtype.Key().Kind() != reflect.String { + return errors.New("fluent#PostWithTime: map keys must be strings") + } + + kv := make(map[string]interface{}) + for _, k := range msg.MapKeys() { + kv[k.String()] = msg.MapIndex(k).Interface() + } + + return f.EncodeAndPostData(tag, tm, kv) +} + +func (f *Fluent) EncodeAndPostData(tag string, tm time.Time, message interface{}) error { + var data []byte + var err error + if data, err = f.EncodeData(tag, tm, message); err != nil { + return fmt.Errorf("fluent#EncodeAndPostData: can't convert '%#v' to msgpack:%v", message, err) + } + return f.postRawData(data) +} + +// Deprecated: Use EncodeAndPostData instead +func (f *Fluent) PostRawData(data []byte) { + f.postRawData(data) +} + +func (f *Fluent) postRawData(data []byte) error { + if err := f.appendBuffer(data); err != nil { + return err + } + if err := f.send(); err != nil { + f.close() + return err + } + return nil +} + +// For sending forward protocol adopted JSON +type MessageChunk struct { + message Message +} + +// Golang default marshaler does not support +// ["value", "value2", {"key":"value"}] style marshaling. +// So, it should write JSON marshaler by hand. +func (chunk *MessageChunk) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(chunk.message.Record) + return []byte(fmt.Sprintf("[\"%s\",%d,%s,null]", chunk.message.Tag, + chunk.message.Time, data)), err +} + +func (f *Fluent) EncodeData(tag string, tm time.Time, message interface{}) (data []byte, err error) { + timeUnix := tm.Unix() + if f.Config.MarshalAsJSON { + msg := Message{Tag: tag, Time: timeUnix, Record: message} + chunk := &MessageChunk{message: msg} + data, err = json.Marshal(chunk) + } else { + msg := &Message{Tag: tag, Time: timeUnix, Record: message} + data, err = msg.MarshalMsg(nil) + } + return +} + +// Close closes the connection. +func (f *Fluent) Close() (err error) { + if len(f.pending) > 0 { + err = f.send() + } + f.close() + return +} + +// appendBuffer appends data to buffer with lock. +func (f *Fluent) appendBuffer(data []byte) error { + f.mubuff.Lock() + defer f.mubuff.Unlock() + if len(f.pending)+len(data) > f.Config.BufferLimit { + return errors.New(fmt.Sprintf("fluent#appendBuffer: Buffer full, limit %v", f.Config.BufferLimit)) + } + f.pending = append(f.pending, data...) + return nil +} + +// close closes the connection. +func (f *Fluent) close() { + f.muconn.Lock() + if f.conn != nil { + f.conn.Close() + f.conn = nil + } + f.muconn.Unlock() +} + +// connect establishes a new connection using the specified transport. +func (f *Fluent) connect() (err error) { + f.muconn.Lock() + defer f.muconn.Unlock() + + switch f.Config.FluentNetwork { + case "tcp": + f.conn, err = net.DialTimeout(f.Config.FluentNetwork, f.Config.FluentHost+":"+strconv.Itoa(f.Config.FluentPort), f.Config.Timeout) + case "unix": + f.conn, err = net.DialTimeout(f.Config.FluentNetwork, f.Config.FluentSocketPath, f.Config.Timeout) + default: + err = net.UnknownNetworkError(f.Config.FluentNetwork) + } + + if err == nil { + f.reconnecting = false + } + return +} + +func e(x, y float64) int { + return int(math.Pow(x, y)) +} + +func (f *Fluent) reconnect() { + for i := 0; ; i++ { + err := f.connect() + if err == nil { + f.send() + return + } + if i == f.Config.MaxRetry { + // TODO: What we can do when connection failed MaxRetry times? + panic("fluent#reconnect: failed to reconnect!") + } + waitTime := f.Config.RetryWait * e(defaultReconnectWaitIncreRate, float64(i-1)) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + } +} + +func (f *Fluent) send() error { + f.muconn.Lock() + defer f.muconn.Unlock() + + if f.conn == nil { + if f.reconnecting == false { + f.reconnecting = true + go f.reconnect() + } + return errors.New("fluent#send: can't send logs, client is reconnecting") + } + + f.mubuff.Lock() + defer f.mubuff.Unlock() + + var err error + if len(f.pending) > 0 { + _, err = f.conn.Write(f.pending) + if err != nil { + f.conn.Close() + f.conn = nil + } else { + f.pending = f.pending[:0] + } + } + return err +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go new file mode 100644 index 000000000000..268d614dfd67 --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go @@ -0,0 +1,24 @@ +//go:generate msgp + +package fluent + +//msgp:tuple Entry +type Entry struct { + Time int64 `msg:"time"` + Record interface{} `msg:"record"` +} + +//msgp:tuple Forward +type Forward struct { + Tag string `msg:"tag"` + Entries []Entry `msg:"entries"` + Option interface{} `msg:"option"` +} + +//msgp:tuple Message +type Message struct { + Tag string `msg:"tag"` + Time int64 `msg:"time"` + Record interface{} `msg:"record"` + Option interface{} `msg:"option"` +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go new file mode 100644 index 000000000000..afb9d6d31f02 --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go @@ -0,0 +1,372 @@ +package fluent + +// NOTE: THIS FILE WAS PRODUCED BY THE +// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) +// DO NOT EDIT + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Entry) DecodeMsg(dc *msgp.Reader) (err error) { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + z.Time, err = dc.ReadInt64() + if err != nil { + return + } + z.Record, err = dc.ReadIntf() + if err != nil { + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z Entry) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(2) + if err != nil { + return + } + err = en.WriteInt64(z.Time) + if err != nil { + return + } + err = en.WriteIntf(z.Record) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z Entry) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, 2) + o = msgp.AppendInt64(o, z.Time) + o, err = msgp.AppendIntf(o, z.Record) + if err != nil { + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Entry) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + } + z.Time, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return + } + z.Record, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + o = bts + return +} + +func (z Entry) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Record) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Forward) DecodeMsg(dc *msgp.Reader) (err error) { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 3 { + err = msgp.ArrayError{Wanted: 3, Got: ssz} + return + } + z.Tag, err = dc.ReadString() + if err != nil { + return + } + var xsz uint32 + xsz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if cap(z.Entries) >= int(xsz) { + z.Entries = z.Entries[:xsz] + } else { + z.Entries = make([]Entry, xsz) + } + for xvk := range z.Entries { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + z.Entries[xvk].Time, err = dc.ReadInt64() + if err != nil { + return + } + z.Entries[xvk].Record, err = dc.ReadIntf() + if err != nil { + return + } + } + z.Option, err = dc.ReadIntf() + if err != nil { + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Forward) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(3) + if err != nil { + return + } + err = en.WriteString(z.Tag) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Entries))) + if err != nil { + return + } + for xvk := range z.Entries { + err = en.WriteArrayHeader(2) + if err != nil { + return + } + err = en.WriteInt64(z.Entries[xvk].Time) + if err != nil { + return + } + err = en.WriteIntf(z.Entries[xvk].Record) + if err != nil { + return + } + } + err = en.WriteIntf(z.Option) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Forward) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, 3) + o = msgp.AppendString(o, z.Tag) + o = msgp.AppendArrayHeader(o, uint32(len(z.Entries))) + for xvk := range z.Entries { + o = msgp.AppendArrayHeader(o, 2) + o = msgp.AppendInt64(o, z.Entries[xvk].Time) + o, err = msgp.AppendIntf(o, z.Entries[xvk].Record) + if err != nil { + return + } + } + o, err = msgp.AppendIntf(o, z.Option) + if err != nil { + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Forward) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 3 { + err = msgp.ArrayError{Wanted: 3, Got: ssz} + return + } + } + z.Tag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + return + } + var xsz uint32 + xsz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if cap(z.Entries) >= int(xsz) { + z.Entries = z.Entries[:xsz] + } else { + z.Entries = make([]Entry, xsz) + } + for xvk := range z.Entries { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + } + z.Entries[xvk].Time, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return + } + z.Entries[xvk].Record, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + } + z.Option, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + o = bts + return +} + +func (z *Forward) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize + for xvk := range z.Entries { + s += msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Entries[xvk].Record) + } + s += msgp.GuessSize(z.Option) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Message) DecodeMsg(dc *msgp.Reader) (err error) { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 4 { + err = msgp.ArrayError{Wanted: 4, Got: ssz} + return + } + z.Tag, err = dc.ReadString() + if err != nil { + return + } + z.Time, err = dc.ReadInt64() + if err != nil { + return + } + z.Record, err = dc.ReadIntf() + if err != nil { + return + } + z.Option, err = dc.ReadIntf() + if err != nil { + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Message) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(4) + if err != nil { + return + } + err = en.WriteString(z.Tag) + if err != nil { + return + } + err = en.WriteInt64(z.Time) + if err != nil { + return + } + err = en.WriteIntf(z.Record) + if err != nil { + return + } + err = en.WriteIntf(z.Option) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Message) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, 4) + o = msgp.AppendString(o, z.Tag) + o = msgp.AppendInt64(o, z.Time) + o, err = msgp.AppendIntf(o, z.Record) + if err != nil { + return + } + o, err = msgp.AppendIntf(o, z.Option) + if err != nil { + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 4 { + err = msgp.ArrayError{Wanted: 4, Got: ssz} + return + } + } + z.Tag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + return + } + z.Time, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return + } + z.Record, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + z.Option, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + o = bts + return +} + +func (z *Message) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option) + return +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go new file mode 100644 index 000000000000..8904726ddbf7 --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go @@ -0,0 +1,3 @@ +package fluent + +const Version = "1.2.1" diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md new file mode 100644 index 000000000000..1ac6a81f6aed --- /dev/null +++ b/vendor/github.com/philhofer/fwd/LICENSE.md @@ -0,0 +1,7 @@ +Copyright (c) 2014-2015, Philip Hofer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md new file mode 100644 index 000000000000..38349af34d3b --- /dev/null +++ b/vendor/github.com/philhofer/fwd/README.md @@ -0,0 +1,315 @@ + +# fwd + import "github.com/philhofer/fwd" + +The `fwd` package provides a buffered reader +and writer. Each has methods that help improve +the encoding/decoding performance of some binary +protocols. + +The `fwd.Writer` and `fwd.Reader` type provide similar +functionality to their counterparts in `bufio`, plus +a few extra utility methods that simplify read-ahead +and write-ahead. I wrote this package to improve serialization +performance for http://github.com/tinylib/msgp, +where it provided about a 2x speedup over `bufio` for certain +workloads. However, care must be taken to understand the semantics of the +extra methods provided by this package, as they allow +the user to access and manipulate the buffer memory +directly. + +The extra methods for `fwd.Reader` are `Peek`, `Skip` +and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +will re-allocate the read buffer in order to accommodate arbitrarily +large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +in the stream, and uses the `io.Seeker` interface if the underlying +stream implements it. `(*fwd.Reader).Next` returns a slice pointing +to the next `n` bytes in the read buffer (like `Peek`), but also +increments the read position. This allows users to process streams +in arbitrary block sizes without having to manage appropriately-sized +slices. Additionally, obviating the need to copy the data from the +buffer to another location in memory can improve performance dramatically +in CPU-bound applications. + +`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +returns a slice pointing to the next `n` bytes of the writer, and increments +the write position by the length of the returned slice. This allows users +to write directly to the end of the buffer. + + + + +## Constants +``` go +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 +) +``` +``` go +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 +) +``` + + + +## type Reader +``` go +type Reader struct { + // contains filtered or unexported fields +} +``` +Reader is a buffered look-ahead reader + + + + + + + + + +### func NewReader +``` go +func NewReader(r io.Reader) *Reader +``` +NewReader returns a new *Reader that reads from 'r' + + +### func NewReaderSize +``` go +func NewReaderSize(r io.Reader, n int) *Reader +``` +NewReaderSize returns a new *Reader that +reads from 'r' and has a buffer size 'n' + + + + +### func (\*Reader) BufferSize +``` go +func (r *Reader) BufferSize() int +``` +BufferSize returns the total size of the buffer + + + +### func (\*Reader) Buffered +``` go +func (r *Reader) Buffered() int +``` +Buffered returns the number of bytes currently in the buffer + + + +### func (\*Reader) Next +``` go +func (r *Reader) Next(n int) ([]byte, error) +``` +Next returns the next 'n' bytes in the stream. +Unlike Peek, Next advances the reader position. +The returned bytes point to the same +data as the buffer, so the slice is +only valid until the next reader method call. +An EOF is considered an unexpected error. +If an the returned slice is less than the +length asked for, an error will be returned, +and the reader position will not be incremented. + + + +### func (\*Reader) Peek +``` go +func (r *Reader) Peek(n int) ([]byte, error) +``` +Peek returns the next 'n' buffered bytes, +reading from the underlying reader if necessary. +It will only return a slice shorter than 'n' bytes +if it also returns an error. Peek does not advance +the reader. EOF errors are *not* returned as +io.ErrUnexpectedEOF. + + + +### func (\*Reader) Read +``` go +func (r *Reader) Read(b []byte) (int, error) +``` +Read implements `io.Reader` + + + +### func (\*Reader) ReadByte +``` go +func (r *Reader) ReadByte() (byte, error) +``` +ReadByte implements `io.ByteReader` + + + +### func (\*Reader) ReadFull +``` go +func (r *Reader) ReadFull(b []byte) (int, error) +``` +ReadFull attempts to read len(b) bytes into +'b'. It returns the number of bytes read into +'b', and an error if it does not return len(b). +EOF is considered an unexpected error. + + + +### func (\*Reader) Reset +``` go +func (r *Reader) Reset(rd io.Reader) +``` +Reset resets the underlying reader +and the read buffer. + + + +### func (\*Reader) Skip +``` go +func (r *Reader) Skip(n int) (int, error) +``` +Skip moves the reader forward 'n' bytes. +Returns the number of bytes skipped and any +errors encountered. It is analogous to Seek(n, 1). +If the underlying reader implements io.Seeker, then +that method will be used to skip forward. + +If the reader encounters +an EOF before skipping 'n' bytes, it +returns io.ErrUnexpectedEOF. If the +underlying reader implements io.Seeker, then +those rules apply instead. (Many implementations +will not return `io.EOF` until the next call +to Read.) + + + +### func (\*Reader) WriteTo +``` go +func (r *Reader) WriteTo(w io.Writer) (int64, error) +``` +WriteTo implements `io.WriterTo` + + + +## type Writer +``` go +type Writer struct { + // contains filtered or unexported fields +} +``` +Writer is a buffered writer + + + + + + + + + +### func NewWriter +``` go +func NewWriter(w io.Writer) *Writer +``` +NewWriter returns a new writer +that writes to 'w' and has a buffer +that is `DefaultWriterSize` bytes. + + +### func NewWriterSize +``` go +func NewWriterSize(w io.Writer, size int) *Writer +``` +NewWriterSize returns a new writer +that writes to 'w' and has a buffer +that is 'size' bytes. + + + + +### func (\*Writer) BufferSize +``` go +func (w *Writer) BufferSize() int +``` +BufferSize returns the maximum size of the buffer. + + + +### func (\*Writer) Buffered +``` go +func (w *Writer) Buffered() int +``` +Buffered returns the number of buffered bytes +in the reader. + + + +### func (\*Writer) Flush +``` go +func (w *Writer) Flush() error +``` +Flush flushes any buffered bytes +to the underlying writer. + + + +### func (\*Writer) Next +``` go +func (w *Writer) Next(n int) ([]byte, error) +``` +Next returns the next 'n' free bytes +in the write buffer, flushing the writer +as necessary. Next will return `io.ErrShortBuffer` +if 'n' is greater than the size of the write buffer. +Calls to 'next' increment the write position by +the size of the returned buffer. + + + +### func (\*Writer) ReadFrom +``` go +func (w *Writer) ReadFrom(r io.Reader) (int64, error) +``` +ReadFrom implements `io.ReaderFrom` + + + +### func (\*Writer) Write +``` go +func (w *Writer) Write(p []byte) (int, error) +``` +Write implements `io.Writer` + + + +### func (\*Writer) WriteByte +``` go +func (w *Writer) WriteByte(b byte) error +``` +WriteByte implements `io.ByteWriter` + + + +### func (\*Writer) WriteString +``` go +func (w *Writer) WriteString(s string) (int, error) +``` +WriteString is analogous to Write, but it takes a string. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go new file mode 100644 index 000000000000..e5919103063e --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader.go @@ -0,0 +1,379 @@ +// The `fwd` package provides a buffered reader +// and writer. Each has methods that help improve +// the encoding/decoding performance of some binary +// protocols. +// +// The `fwd.Writer` and `fwd.Reader` type provide similar +// functionality to their counterparts in `bufio`, plus +// a few extra utility methods that simplify read-ahead +// and write-ahead. I wrote this package to improve serialization +// performance for http://github.com/tinylib/msgp, +// where it provided about a 2x speedup over `bufio` for certain +// workloads. However, care must be taken to understand the semantics of the +// extra methods provided by this package, as they allow +// the user to access and manipulate the buffer memory +// directly. +// +// The extra methods for `fwd.Reader` are `Peek`, `Skip` +// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +// will re-allocate the read buffer in order to accommodate arbitrarily +// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +// in the stream, and uses the `io.Seeker` interface if the underlying +// stream implements it. `(*fwd.Reader).Next` returns a slice pointing +// to the next `n` bytes in the read buffer (like `Peek`), but also +// increments the read position. This allows users to process streams +// in arbitrary block sizes without having to manage appropriately-sized +// slices. Additionally, obviating the need to copy the data from the +// buffer to another location in memory can improve performance dramatically +// in CPU-bound applications. +// +// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +// returns a slice pointing to the next `n` bytes of the writer, and increments +// the write position by the length of the returned slice. This allows users +// to write directly to the end of the buffer. +// +package fwd + +import "io" + +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 + + // minimum read buffer; straight from bufio + minReaderSize = 16 +) + +// NewReader returns a new *Reader that reads from 'r' +func NewReader(r io.Reader) *Reader { + return NewReaderSize(r, DefaultReaderSize) +} + +// NewReaderSize returns a new *Reader that +// reads from 'r' and has a buffer size 'n' +func NewReaderSize(r io.Reader, n int) *Reader { + rd := &Reader{ + r: r, + data: make([]byte, 0, max(minReaderSize, n)), + } + if s, ok := r.(io.Seeker); ok { + rd.rs = s + } + return rd +} + +// Reader is a buffered look-ahead reader +type Reader struct { + r io.Reader // underlying reader + + // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space + data []byte // data + n int // read offset + state error // last read error + + // if the reader past to NewReader was + // also an io.Seeker, this is non-nil + rs io.Seeker +} + +// Reset resets the underlying reader +// and the read buffer. +func (r *Reader) Reset(rd io.Reader) { + r.r = rd + r.data = r.data[0:0] + r.n = 0 + r.state = nil + if s, ok := rd.(io.Seeker); ok { + r.rs = s + } else { + r.rs = nil + } +} + +// more() does one read on the underlying reader +func (r *Reader) more() { + // move data backwards so that + // the read offset is 0; this way + // we can supply the maximum number of + // bytes to the reader + if r.n != 0 { + if r.n < len(r.data) { + r.data = r.data[:copy(r.data[0:], r.data[r.n:])] + } else { + r.data = r.data[:0] + } + r.n = 0 + } + var a int + a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) + if a == 0 && r.state == nil { + r.state = io.ErrNoProgress + return + } + r.data = r.data[:len(r.data)+a] +} + +// pop error +func (r *Reader) err() (e error) { + e, r.state = r.state, nil + return +} + +// pop error; EOF -> io.ErrUnexpectedEOF +func (r *Reader) noEOF() (e error) { + e, r.state = r.state, nil + if e == io.EOF { + e = io.ErrUnexpectedEOF + } + return +} + +// buffered bytes +func (r *Reader) buffered() int { return len(r.data) - r.n } + +// Buffered returns the number of bytes currently in the buffer +func (r *Reader) Buffered() int { return len(r.data) - r.n } + +// BufferSize returns the total size of the buffer +func (r *Reader) BufferSize() int { return cap(r.data) } + +// Peek returns the next 'n' buffered bytes, +// reading from the underlying reader if necessary. +// It will only return a slice shorter than 'n' bytes +// if it also returns an error. Peek does not advance +// the reader. EOF errors are *not* returned as +// io.ErrUnexpectedEOF. +func (r *Reader) Peek(n int) ([]byte, error) { + // in the degenerate case, + // we may need to realloc + // (the caller asked for more + // bytes than the size of the buffer) + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return r.data[r.n:], r.err() + } + + return r.data[r.n : r.n+n], nil +} + +// Skip moves the reader forward 'n' bytes. +// Returns the number of bytes skipped and any +// errors encountered. It is analogous to Seek(n, 1). +// If the underlying reader implements io.Seeker, then +// that method will be used to skip forward. +// +// If the reader encounters +// an EOF before skipping 'n' bytes, it +// returns io.ErrUnexpectedEOF. If the +// underlying reader implements io.Seeker, then +// those rules apply instead. (Many implementations +// will not return `io.EOF` until the next call +// to Read.) +func (r *Reader) Skip(n int) (int, error) { + + // fast path + if r.buffered() >= n { + r.n += n + return n, nil + } + + // use seeker implementation + // if we can + if r.rs != nil { + return r.skipSeek(n) + } + + // loop on filling + // and then erasing + o := n + for r.buffered() < n && r.state == nil { + r.more() + // we can skip forward + // up to r.buffered() bytes + step := min(r.buffered(), n) + r.n += step + n -= step + } + // at this point, n should be + // 0 if everything went smoothly + return o - n, r.noEOF() +} + +// Next returns the next 'n' bytes in the stream. +// Unlike Peek, Next advances the reader position. +// The returned bytes point to the same +// data as the buffer, so the slice is +// only valid until the next reader method call. +// An EOF is considered an unexpected error. +// If an the returned slice is less than the +// length asked for, an error will be returned, +// and the reader position will not be incremented. +func (r *Reader) Next(n int) ([]byte, error) { + + // in case the buffer is too small + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // fill at least 'n' bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + if r.buffered() < n { + return r.data[r.n:], r.noEOF() + } + out := r.data[r.n : r.n+n] + r.n += n + return out, nil +} + +// skipSeek uses the io.Seeker to seek forward. +// only call this function when n > r.buffered() +func (r *Reader) skipSeek(n int) (int, error) { + o := r.buffered() + // first, clear buffer + n -= o + r.n = 0 + r.data = r.data[:0] + + // then seek forward remaning bytes + i, err := r.rs.Seek(int64(n), 1) + return int(i) + o, err +} + +// Read implements `io.Reader` +func (r *Reader) Read(b []byte) (int, error) { + // if we have data in the buffer, just + // return that. + if r.buffered() != 0 { + x := copy(b, r.data[r.n:]) + r.n += x + return x, nil + } + var n int + // we have no buffered data; determine + // whether or not to buffer or call + // the underlying reader directly + if len(b) >= cap(r.data) { + n, r.state = r.r.Read(b) + } else { + r.more() + n = copy(b, r.data) + r.n = n + } + if n == 0 { + return 0, r.err() + } + return n, nil +} + +// ReadFull attempts to read len(b) bytes into +// 'b'. It returns the number of bytes read into +// 'b', and an error if it does not return len(b). +// EOF is considered an unexpected error. +func (r *Reader) ReadFull(b []byte) (int, error) { + var n int // read into b + var nn int // scratch + l := len(b) + // either read buffered data, + // or read directly for the underlying + // buffer, or fetch more buffered data. + for n < l && r.state == nil { + if r.buffered() != 0 { + nn = copy(b[n:], r.data[r.n:]) + n += nn + r.n += nn + } else if l-n > cap(r.data) { + nn, r.state = r.r.Read(b[n:]) + n += nn + } else { + r.more() + } + } + if n < l { + return n, r.noEOF() + } + return n, nil +} + +// ReadByte implements `io.ByteReader` +func (r *Reader) ReadByte() (byte, error) { + for r.buffered() < 1 && r.state == nil { + r.more() + } + if r.buffered() < 1 { + return 0, r.err() + } + b := r.data[r.n] + r.n++ + return b, nil +} + +// WriteTo implements `io.WriterTo` +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + var ( + i int64 + ii int + err error + ) + // first, clear buffer + if r.buffered() > 0 { + ii, err = w.Write(r.data[r.n:]) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + for r.state == nil { + // here we just do + // 1:1 reads and writes + r.more() + if r.buffered() > 0 { + ii, err = w.Write(r.data) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + } + if r.state != io.EOF { + return i, r.err() + } + return i, nil +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +func max(a int, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go new file mode 100644 index 000000000000..2dc392a91bd6 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer.go @@ -0,0 +1,224 @@ +package fwd + +import "io" + +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 + + minWriterSize = minReaderSize +) + +// Writer is a buffered writer +type Writer struct { + w io.Writer // writer + buf []byte // 0:len(buf) is bufered data +} + +// NewWriter returns a new writer +// that writes to 'w' and has a buffer +// that is `DefaultWriterSize` bytes. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, DefaultWriterSize), + } +} + +// NewWriterSize returns a new writer +// that writes to 'w' and has a buffer +// that is 'size' bytes. +func NewWriterSize(w io.Writer, size int) *Writer { + if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, max(size, minWriterSize)), + } +} + +// Buffered returns the number of buffered bytes +// in the reader. +func (w *Writer) Buffered() int { return len(w.buf) } + +// BufferSize returns the maximum size of the buffer. +func (w *Writer) BufferSize() int { return cap(w.buf) } + +// Flush flushes any buffered bytes +// to the underlying writer. +func (w *Writer) Flush() error { + l := len(w.buf) + if l > 0 { + n, err := w.w.Write(w.buf) + + // if we didn't write the whole + // thing, copy the unwritten + // bytes to the beginnning of the + // buffer. + if n < l && n > 0 { + w.pushback(n) + if err == nil { + err = io.ErrShortWrite + } + } + if err != nil { + return err + } + w.buf = w.buf[:0] + return nil + } + return nil +} + +// Write implements `io.Writer` +func (w *Writer) Write(p []byte) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(p) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + if c < ln { + return w.w.Write(p) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], p), nil +} + +// WriteString is analogous to Write, but it takes a string. +func (w *Writer) WriteString(s string) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(s) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + // + // yes, this is unsafe. *but* + // io.Writer is not allowed + // to mutate its input or + // maintain a reference to it, + // per the spec in package io. + // + // plus, if the string is really + // too big to fit in the buffer, then + // creating a copy to write it is + // expensive (and, strictly speaking, + // unnecessary) + if c < ln { + return w.w.Write(unsafestr(s)) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], s), nil +} + +// WriteByte implements `io.ByteWriter` +func (w *Writer) WriteByte(b byte) error { + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + w.buf = append(w.buf, b) + return nil +} + +// Next returns the next 'n' free bytes +// in the write buffer, flushing the writer +// as necessary. Next will return `io.ErrShortBuffer` +// if 'n' is greater than the size of the write buffer. +// Calls to 'next' increment the write position by +// the size of the returned buffer. +func (w *Writer) Next(n int) ([]byte, error) { + c, l := cap(w.buf), len(w.buf) + if n > c { + return nil, io.ErrShortBuffer + } + avail := c - l + if avail < n { + if err := w.Flush(); err != nil { + return nil, err + } + l = len(w.buf) + } + w.buf = w.buf[:l+n] + return w.buf[l:], nil +} + +// take the bytes from w.buf[n:len(w.buf)] +// and put them at the beginning of w.buf, +// and resize to the length of the copied segment. +func (w *Writer) pushback(n int) { + w.buf = w.buf[:copy(w.buf, w.buf[n:])] +} + +// ReadFrom implements `io.ReaderFrom` +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // anticipatory flush + if err := w.Flush(); err != nil { + return 0, err + } + + w.buf = w.buf[0:cap(w.buf)] // expand buffer + + var nn int64 // written + var err error // error + var x int // read + + // 1:1 reads and writes + for err == nil { + x, err = r.Read(w.buf) + if x > 0 { + n, werr := w.w.Write(w.buf[:x]) + nn += int64(n) + + if err != nil { + if n < x && n > 0 { + w.pushback(n - x) + } + return nn, werr + } + if n < x { + w.pushback(n - x) + return nn, io.ErrShortWrite + } + } else if err == nil { + err = io.ErrNoProgress + break + } + } + if err != io.EOF { + return nn, err + } + + // we only clear here + // because we are sure + // the writes have + // succeeded. otherwise, + // we retain the data in case + // future writes succeed. + w.buf = w.buf[0:0] + + return nn, nil +} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go new file mode 100644 index 000000000000..e367f39317a0 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_appengine.go @@ -0,0 +1,5 @@ +// +build appengine + +package fwd + +func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go new file mode 100644 index 000000000000..a0bf453b394c --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go @@ -0,0 +1,18 @@ +// +build !appengine + +package fwd + +import ( + "reflect" + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(b string) []byte { + l := len(b) + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: l, + Cap: l, + Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, + })) +} diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE new file mode 100644 index 000000000000..14d60424e88f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go new file mode 100644 index 000000000000..6c6bb37a5f43 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go @@ -0,0 +1,24 @@ +// +build linux,!appengine + +package msgp + +import ( + "os" + "syscall" +) + +func adviseRead(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) +} + +func adviseWrite(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) +} + +func fallocate(f *os.File, sz int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) + if err == syscall.ENOTSUP { + return f.Truncate(sz) + } + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go new file mode 100644 index 000000000000..da65ea541268 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go @@ -0,0 +1,17 @@ +// +build !linux appengine + +package msgp + +import ( + "os" +) + +// TODO: darwin, BSD support + +func adviseRead(mem []byte) {} + +func adviseWrite(mem []byte) {} + +func fallocate(f *os.File, sz int64) error { + return f.Truncate(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/appengine.go b/vendor/github.com/tinylib/msgp/msgp/appengine.go new file mode 100644 index 000000000000..bff9e768ab9f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package msgp + +// let's just assume appengine +// uses 64-bit hardware... +const smallint = false + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go new file mode 100644 index 000000000000..a0434c7ea1be --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/circular.go @@ -0,0 +1,39 @@ +package msgp + +type timer interface { + StartTimer() + StopTimer() +} + +// EndlessReader is an io.Reader +// that loops over the same data +// endlessly. It is used for benchmarking. +type EndlessReader struct { + tb timer + data []byte + offset int +} + +// NewEndlessReader returns a new endless reader +func NewEndlessReader(b []byte, tb timer) *EndlessReader { + return &EndlessReader{tb: tb, data: b, offset: 0} +} + +// Read implements io.Reader. In practice, it +// always returns (len(p), nil), although it +// fills the supplied slice while the benchmark +// timer is stopped. +func (c *EndlessReader) Read(p []byte) (int, error) { + c.tb.StopTimer() + var n int + l := len(p) + m := len(c.data) + for n < l { + nn := copy(p[n:], c.data[c.offset:]) + n += nn + c.offset += nn + c.offset %= m + } + c.tb.StartTimer() + return n, nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go new file mode 100644 index 000000000000..c634eef1dfbd --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -0,0 +1,142 @@ +// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). +// +// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack +// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code +// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. +// +// This package defines four "families" of functions: +// - AppendXxxx() appends an object to a []byte in MessagePack encoding. +// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. +// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. +// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. +// +// Once a type has satisfied the `Encodable` and `Decodable` interfaces, +// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using +// msgp.Encode(io.Writer, msgp.Encodable) +// and +// msgp.Decode(io.Reader, msgp.Decodable) +// +// There are also methods for converting MessagePack to JSON without +// an explicit de-serialization step. +// +// For additional tips, tricks, and gotchas, please visit +// the wiki at http://github.com/tinylib/msgp +package msgp + +const last4 = 0x0f +const first4 = 0xf0 +const last5 = 0x1f +const first3 = 0xe0 +const last7 = 0x7f + +func isfixint(b byte) bool { + return b>>7 == 0 +} + +func isnfixint(b byte) bool { + return b&first3 == mnfixint +} + +func isfixmap(b byte) bool { + return b&first4 == mfixmap +} + +func isfixarray(b byte) bool { + return b&first4 == mfixarray +} + +func isfixstr(b byte) bool { + return b&first3 == mfixstr +} + +func wfixint(u uint8) byte { + return u & last7 +} + +func rfixint(b byte) uint8 { + return b +} + +func wnfixint(i int8) byte { + return byte(i) | mnfixint +} + +func rnfixint(b byte) int8 { + return int8(b) +} + +func rfixmap(b byte) uint8 { + return b & last4 +} + +func wfixmap(u uint8) byte { + return mfixmap | (u & last4) +} + +func rfixstr(b byte) uint8 { + return b & last5 +} + +func wfixstr(u uint8) byte { + return (u & last5) | mfixstr +} + +func rfixarray(b byte) uint8 { + return (b & last4) +} + +func wfixarray(u uint8) byte { + return (u & last4) | mfixarray +} + +// These are all the byte +// prefixes defined by the +// msgpack standard +const ( + // 0XXXXXXX + mfixint uint8 = 0x00 + + // 111XXXXX + mnfixint uint8 = 0xe0 + + // 1000XXXX + mfixmap uint8 = 0x80 + + // 1001XXXX + mfixarray uint8 = 0x90 + + // 101XXXXX + mfixstr uint8 = 0xa0 + + mnil uint8 = 0xc0 + mfalse uint8 = 0xc2 + mtrue uint8 = 0xc3 + mbin8 uint8 = 0xc4 + mbin16 uint8 = 0xc5 + mbin32 uint8 = 0xc6 + mext8 uint8 = 0xc7 + mext16 uint8 = 0xc8 + mext32 uint8 = 0xc9 + mfloat32 uint8 = 0xca + mfloat64 uint8 = 0xcb + muint8 uint8 = 0xcc + muint16 uint8 = 0xcd + muint32 uint8 = 0xce + muint64 uint8 = 0xcf + mint8 uint8 = 0xd0 + mint16 uint8 = 0xd1 + mint32 uint8 = 0xd2 + mint64 uint8 = 0xd3 + mfixext1 uint8 = 0xd4 + mfixext2 uint8 = 0xd5 + mfixext4 uint8 = 0xd6 + mfixext8 uint8 = 0xd7 + mfixext16 uint8 = 0xd8 + mstr8 uint8 = 0xd9 + mstr16 uint8 = 0xda + mstr32 uint8 = 0xdb + marray16 uint8 = 0xdc + marray32 uint8 = 0xdd + mmap16 uint8 = 0xde + mmap32 uint8 = 0xdf +) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go new file mode 100644 index 000000000000..41f929864642 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit.go @@ -0,0 +1,241 @@ +package msgp + +import ( + "math" +) + +// Locate returns a []byte pointing to the field +// in a messagepack map with the provided key. (The returned []byte +// points to a sub-slice of 'raw'; Locate does no allocations.) If the +// key doesn't exist in the map, a zero-length []byte will be returned. +func Locate(key string, raw []byte) []byte { + s, n := locate(raw, key) + return raw[s:n] +} + +// Replace takes a key ("key") in a messagepack map ("raw") +// and replaces its value with the one provided and returns +// the new []byte. The returned []byte may point to the same +// memory as "raw". Replace makes no effort to evaluate the validity +// of the contents of 'val'. It may use up to the full capacity of 'raw.' +// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' +// is not a map. +func Replace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, true) +} + +// CopyReplace works similarly to Replace except that the returned +// byte slice does not point to the same memory as 'raw'. CopyReplace +// returns 'nil' if the field doesn't exist or 'raw' isn't a map. +func CopyReplace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, false) +} + +// Remove removes a key-value pair from 'raw'. It returns +// 'raw' unchanged if the key didn't exist. +func Remove(key string, raw []byte) []byte { + start, end := locateKV(raw, key) + if start == end { + return raw + } + raw = raw[:start+copy(raw[start:], raw[end:])] + return resizeMap(raw, -1) +} + +// HasKey returns whether the map in 'raw' has +// a field with key 'key' +func HasKey(key string, raw []byte) bool { + sz, bts, err := ReadMapHeaderBytes(raw) + if err != nil { + return false + } + var field []byte + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return false + } + if UnsafeString(field) == key { + return true + } + } + return false +} + +func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { + ll := end - start // length of segment to replace + lv := len(val) + + if inplace { + extra := lv - ll + + // fastest case: we're doing + // a 1:1 replacement + if extra == 0 { + copy(raw[start:], val) + return raw + + } else if extra < 0 { + // 'val' smaller than replaced value + // copy in place and shift back + + x := copy(raw[start:], val) + y := copy(raw[start+x:], raw[end:]) + return raw[:start+x+y] + + } else if extra < cap(raw)-len(raw) { + // 'val' less than (cap-len) extra bytes + // copy in place and shift forward + raw = raw[0 : len(raw)+extra] + // shift end forward + copy(raw[end+extra:], raw[end:]) + copy(raw[start:], val) + return raw + } + } + + // we have to allocate new space + out := make([]byte, len(raw)+len(val)-ll) + x := copy(out, raw[:start]) + y := copy(out[x:], val) + copy(out[x+y:], raw[end:]) + return out +} + +// locate does a naive O(n) search for the map key; returns start, end +// (returns 0,0 on error) +func locate(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return + } + + // loop and locate field + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + // start location + l := len(raw) + start = l - len(bts) + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = l - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// locate key AND value +func locateKV(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return 0, 0 + } + + for i := uint32(0); i < sz; i++ { + tmp := len(bts) + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + start = len(raw) - tmp + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = len(raw) - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// delta is delta on map size +func resizeMap(raw []byte, delta int64) []byte { + var sz int64 + switch raw[0] { + case mmap16: + sz = int64(big.Uint16(raw[1:])) + if sz+delta <= math.MaxUint16 { + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[5:], raw[3:]) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[3:]...) + + case mmap32: + sz = int64(big.Uint32(raw[1:])) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + + default: + sz = int64(rfixmap(raw[0])) + if sz+delta < 16 { + raw[0] = wfixmap(uint8(sz + delta)) + return raw + } else if sz+delta <= math.MaxUint16 { + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[3:], raw[1:]) + raw[0] = mmap16 + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } + if cap(raw)-len(raw) >= 4 { + raw = raw[0 : len(raw)+4] + copy(raw[5:], raw[1:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go new file mode 100644 index 000000000000..95762e7eebe8 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go @@ -0,0 +1,99 @@ +package msgp + +// size of every object on the wire, +// plus type information. gives us +// constant-time type information +// for traversing composite objects. +// +var sizes = [256]bytespec{ + mnil: {size: 1, extra: constsize, typ: NilType}, + mfalse: {size: 1, extra: constsize, typ: BoolType}, + mtrue: {size: 1, extra: constsize, typ: BoolType}, + mbin8: {size: 2, extra: extra8, typ: BinType}, + mbin16: {size: 3, extra: extra16, typ: BinType}, + mbin32: {size: 5, extra: extra32, typ: BinType}, + mext8: {size: 3, extra: extra8, typ: ExtensionType}, + mext16: {size: 4, extra: extra16, typ: ExtensionType}, + mext32: {size: 6, extra: extra32, typ: ExtensionType}, + mfloat32: {size: 5, extra: constsize, typ: Float32Type}, + mfloat64: {size: 9, extra: constsize, typ: Float64Type}, + muint8: {size: 2, extra: constsize, typ: UintType}, + muint16: {size: 3, extra: constsize, typ: UintType}, + muint32: {size: 5, extra: constsize, typ: UintType}, + muint64: {size: 9, extra: constsize, typ: UintType}, + mint8: {size: 2, extra: constsize, typ: IntType}, + mint16: {size: 3, extra: constsize, typ: IntType}, + mint32: {size: 5, extra: constsize, typ: IntType}, + mint64: {size: 9, extra: constsize, typ: IntType}, + mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, + mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, + mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, + mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, + mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, + mstr8: {size: 2, extra: extra8, typ: StrType}, + mstr16: {size: 3, extra: extra16, typ: StrType}, + mstr32: {size: 5, extra: extra32, typ: StrType}, + marray16: {size: 3, extra: array16v, typ: ArrayType}, + marray32: {size: 5, extra: array32v, typ: ArrayType}, + mmap16: {size: 3, extra: map16v, typ: MapType}, + mmap32: {size: 5, extra: map32v, typ: MapType}, +} + +func init() { + // set up fixed fields + + // fixint + for i := mfixint; i < 0x80; i++ { + sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // nfixint + for i := uint16(mnfixint); i < 0x100; i++ { + sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // fixstr gets constsize, + // since the prefix yields the size + for i := mfixstr; i < 0xc0; i++ { + sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} + } + + // fixmap + for i := mfixmap; i < 0x90; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} + } + + // fixarray + for i := mfixarray; i < 0xa0; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} + } +} + +// a valid bytespsec has +// non-zero 'size' and +// non-zero 'typ' +type bytespec struct { + size uint8 // prefix size information + extra varmode // extra size information + typ Type // type + _ byte // makes bytespec 4 bytes (yes, this matters) +} + +// size mode +// if positive, # elements for composites +type varmode int8 + +const ( + constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) + extra8 = -1 // has uint8(p[1]) extra bytes + extra16 = -2 // has be16(p[1:]) extra bytes + extra32 = -3 // has be32(p[1:]) extra bytes + map16v = -4 // use map16 + map32v = -5 // use map32 + array16v = -6 // use array16 + array32v = -7 // use array32 +) + +func getType(v byte) Type { + return sizes[v].typ +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go new file mode 100644 index 000000000000..5c24f2710388 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -0,0 +1,142 @@ +package msgp + +import ( + "fmt" + "reflect" +) + +var ( + // ErrShortBytes is returned when the + // slice being decoded is too short to + // contain the contents of the message + ErrShortBytes error = errShort{} + + // this error is only returned + // if we reach code that should + // be unreachable + fatal error = errFatal{} +) + +// Error is the interface satisfied +// by all of the errors that originate +// from this package. +type Error interface { + error + + // Resumable returns whether + // or not the error means that + // the stream of data is malformed + // and the information is unrecoverable. + Resumable() bool +} + +type errShort struct{} + +func (e errShort) Error() string { return "msgp: too few bytes left to read object" } +func (e errShort) Resumable() bool { return false } + +type errFatal struct{} + +func (f errFatal) Error() string { return "msgp: fatal decoding error (unreachable code)" } +func (f errFatal) Resumable() bool { return false } + +// ArrayError is an error returned +// when decoding a fix-sized array +// of the wrong size +type ArrayError struct { + Wanted uint32 + Got uint32 +} + +// Error implements the error interface +func (a ArrayError) Error() string { + return fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) +} + +// Resumable is always 'true' for ArrayErrors +func (a ArrayError) Resumable() bool { return true } + +// IntOverflow is returned when a call +// would downcast an integer to a type +// with too few bits to hold its value. +type IntOverflow struct { + Value int64 // the value of the integer + FailedBitsize int // the bit size that the int64 could not fit into +} + +// Error implements the error interface +func (i IntOverflow) Error() string { + return fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) +} + +// Resumable is always 'true' for overflows +func (i IntOverflow) Resumable() bool { return true } + +// UintOverflow is returned when a call +// would downcast an unsigned integer to a type +// with too few bits to hold its value +type UintOverflow struct { + Value uint64 // value of the uint + FailedBitsize int // the bit size that couldn't fit the value +} + +// Error implements the error interface +func (u UintOverflow) Error() string { + return fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) +} + +// Resumable is always 'true' for overflows +func (u UintOverflow) Resumable() bool { return true } + +// A TypeError is returned when a particular +// decoding method is unsuitable for decoding +// a particular MessagePack value. +type TypeError struct { + Method Type // Type expected by method + Encoded Type // Type actually encoded +} + +// Error implements the error interface +func (t TypeError) Error() string { + return fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) +} + +// Resumable returns 'true' for TypeErrors +func (t TypeError) Resumable() bool { return true } + +// returns either InvalidPrefixError or +// TypeError depending on whether or not +// the prefix is recognized +func badPrefix(want Type, lead byte) error { + t := sizes[lead].typ + if t == InvalidType { + return InvalidPrefixError(lead) + } + return TypeError{Method: want, Encoded: t} +} + +// InvalidPrefixError is returned when a bad encoding +// uses a prefix that is not recognized in the MessagePack standard. +// This kind of error is unrecoverable. +type InvalidPrefixError byte + +// Error implements the error interface +func (i InvalidPrefixError) Error() string { + return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) +} + +// Resumable returns 'false' for InvalidPrefixErrors +func (i InvalidPrefixError) Resumable() bool { return false } + +// ErrUnsupportedType is returned +// when a bad argument is supplied +// to a function that takes `interface{}`. +type ErrUnsupportedType struct { + T reflect.Type +} + +// Error implements error +func (e *ErrUnsupportedType) Error() string { return fmt.Sprintf("msgp: type %q not supported", e.T) } + +// Resumable returns 'true' for ErrUnsupportedType +func (e *ErrUnsupportedType) Resumable() bool { return true } diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go new file mode 100644 index 000000000000..588b18f95b97 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension.go @@ -0,0 +1,548 @@ +package msgp + +import ( + "fmt" + "math" +) + +const ( + // Complex64Extension is the extension number used for complex64 + Complex64Extension = 3 + + // Complex128Extension is the extension number used for complex128 + Complex128Extension = 4 + + // TimeExtension is the extension number used for time.Time + TimeExtension = 5 +) + +// our extensions live here +var extensionReg = make(map[int8]func() Extension) + +// RegisterExtension registers extensions so that they +// can be initialized and returned by methods that +// decode `interface{}` values. This should only +// be called during initialization. f() should return +// a newly-initialized zero value of the extension. Keep in +// mind that extensions 3, 4, and 5 are reserved for +// complex64, complex128, and time.Time, respectively, +// and that MessagePack reserves extension types from -127 to -1. +// +// For example, if you wanted to register a user-defined struct: +// +// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) +// +// RegisterExtension will panic if you call it multiple times +// with the same 'typ' argument, or if you use a reserved +// type (3, 4, or 5). +func RegisterExtension(typ int8, f func() Extension) { + switch typ { + case Complex64Extension, Complex128Extension, TimeExtension: + panic(fmt.Sprint("msgp: forbidden extension type:", typ)) + } + if _, ok := extensionReg[typ]; ok { + panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) + } + extensionReg[typ] = f +} + +// ExtensionTypeError is an error type returned +// when there is a mis-match between an extension type +// and the type encoded on the wire +type ExtensionTypeError struct { + Got int8 + Want int8 +} + +// Error implements the error interface +func (e ExtensionTypeError) Error() string { + return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) +} + +// Resumable returns 'true' for ExtensionTypeErrors +func (e ExtensionTypeError) Resumable() bool { return true } + +func errExt(got int8, wanted int8) error { + return ExtensionTypeError{Got: got, Want: wanted} +} + +// Extension is the interface fulfilled +// by types that want to define their +// own binary encoding. +type Extension interface { + // ExtensionType should return + // a int8 that identifies the concrete + // type of the extension. (Types <0 are + // officially reserved by the MessagePack + // specifications.) + ExtensionType() int8 + + // Len should return the length + // of the data to be encoded + Len() int + + // MarshalBinaryTo should copy + // the data into the supplied slice, + // assuming that the slice has length Len() + MarshalBinaryTo([]byte) error + + UnmarshalBinary([]byte) error +} + +// RawExtension implements the Extension interface +type RawExtension struct { + Data []byte + Type int8 +} + +// ExtensionType implements Extension.ExtensionType, and returns r.Type +func (r *RawExtension) ExtensionType() int8 { return r.Type } + +// Len implements Extension.Len, and returns len(r.Data) +func (r *RawExtension) Len() int { return len(r.Data) } + +// MarshalBinaryTo implements Extension.MarshalBinaryTo, +// and returns a copy of r.Data +func (r *RawExtension) MarshalBinaryTo(d []byte) error { + copy(d, r.Data) + return nil +} + +// UnmarshalBinary implements Extension.UnmarshalBinary, +// and sets r.Data to the contents of the provided slice +func (r *RawExtension) UnmarshalBinary(b []byte) error { + if cap(r.Data) >= len(b) { + r.Data = r.Data[0:len(b)] + } else { + r.Data = make([]byte, len(b)) + } + copy(r.Data, b) + return nil +} + +// WriteExtension writes an extension type to the writer +func (mw *Writer) WriteExtension(e Extension) error { + l := e.Len() + var err error + switch l { + case 0: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 0 + mw.buf[o+2] = byte(e.ExtensionType()) + case 1: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext1 + mw.buf[o+1] = byte(e.ExtensionType()) + case 2: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext2 + mw.buf[o+1] = byte(e.ExtensionType()) + case 4: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(e.ExtensionType()) + case 8: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(e.ExtensionType()) + case 16: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = byte(e.ExtensionType()) + default: + switch { + case l < math.MaxUint8: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = byte(uint8(l)) + mw.buf[o+2] = byte(e.ExtensionType()) + case l < math.MaxUint16: + o, err := mw.require(4) + if err != nil { + return err + } + mw.buf[o] = mext16 + big.PutUint16(mw.buf[o+1:], uint16(l)) + mw.buf[o+3] = byte(e.ExtensionType()) + default: + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mext32 + big.PutUint32(mw.buf[o+1:], uint32(l)) + mw.buf[o+5] = byte(e.ExtensionType()) + } + } + // we can only write directly to the + // buffer if we're sure that it + // fits the object + if l <= mw.bufsize() { + o, err := mw.require(l) + if err != nil { + return err + } + return e.MarshalBinaryTo(mw.buf[o:]) + } + // here we create a new buffer + // just large enough for the body + // and save it as the write buffer + err = mw.flush() + if err != nil { + return err + } + buf := make([]byte, l) + err = e.MarshalBinaryTo(buf) + if err != nil { + return err + } + mw.buf = buf + mw.wloc = l + return nil +} + +// peek at the extension type, assuming the next +// kind to be read is Extension +func (m *Reader) peekExtensionType() (int8, error) { + p, err := m.R.Peek(2) + if err != nil { + return 0, err + } + spec := sizes[p[0]] + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, p[0]) + } + if spec.extra == constsize { + return int8(p[1]), nil + } + size := spec.size + p, err = m.R.Peek(int(size)) + if err != nil { + return 0, err + } + return int8(p[size-1]), nil +} + +// peekExtension peeks at the extension encoding type +// (must guarantee at least 1 byte in 'b') +func peekExtension(b []byte) (int8, error) { + spec := sizes[b[0]] + size := spec.size + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, b[0]) + } + if len(b) < int(size) { + return 0, ErrShortBytes + } + // for fixed extensions, + // the type information is in + // the second byte + if spec.extra == constsize { + return int8(b[1]), nil + } + // otherwise, it's in the last + // part of the prefix + return int8(b[size-1]), nil +} + +// ReadExtension reads the next object from the reader +// as an extension. ReadExtension will fail if the next +// object in the stream is not an extension, or if +// e.Type() is not the same as the wire type. +func (m *Reader) ReadExtension(e Extension) (err error) { + var p []byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead := p[0] + var read int + var off int + switch lead { + case mfixext1: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(3) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(3) + } + return + + case mfixext2: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(4) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(4) + } + return + + case mfixext4: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(6) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(6) + } + return + + case mfixext8: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(10) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(10) + } + return + + case mfixext16: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(18) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(18) + } + return + + case mext8: + p, err = m.R.Peek(3) + if err != nil { + return + } + if int8(p[2]) != e.ExtensionType() { + err = errExt(int8(p[2]), e.ExtensionType()) + return + } + read = int(uint8(p[1])) + off = 3 + + case mext16: + p, err = m.R.Peek(4) + if err != nil { + return + } + if int8(p[3]) != e.ExtensionType() { + err = errExt(int8(p[3]), e.ExtensionType()) + return + } + read = int(big.Uint16(p[1:])) + off = 4 + + case mext32: + p, err = m.R.Peek(6) + if err != nil { + return + } + if int8(p[5]) != e.ExtensionType() { + err = errExt(int8(p[5]), e.ExtensionType()) + return + } + read = int(big.Uint32(p[1:])) + off = 6 + + default: + err = badPrefix(ExtensionType, lead) + return + } + + p, err = m.R.Peek(read + off) + if err != nil { + return + } + err = e.UnmarshalBinary(p[off:]) + if err == nil { + _, err = m.R.Skip(read + off) + } + return +} + +// AppendExtension appends a MessagePack extension to the provided slice +func AppendExtension(b []byte, e Extension) ([]byte, error) { + l := e.Len() + var o []byte + var n int + switch l { + case 0: + o, n = ensure(b, 3) + o[n] = mext8 + o[n+1] = 0 + o[n+2] = byte(e.ExtensionType()) + return o[:n+3], nil + case 1: + o, n = ensure(b, 3) + o[n] = mfixext1 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 2: + o, n = ensure(b, 4) + o[n] = mfixext2 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 4: + o, n = ensure(b, 6) + o[n] = mfixext4 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 8: + o, n = ensure(b, 10) + o[n] = mfixext8 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 16: + o, n = ensure(b, 18) + o[n] = mfixext16 + o[n+1] = byte(e.ExtensionType()) + n += 2 + } + switch { + case l < math.MaxUint8: + o, n = ensure(b, l+3) + o[n] = mext8 + o[n+1] = byte(uint8(l)) + o[n+2] = byte(e.ExtensionType()) + n += 3 + case l < math.MaxUint16: + o, n = ensure(b, l+4) + o[n] = mext16 + big.PutUint16(o[n+1:], uint16(l)) + o[n+3] = byte(e.ExtensionType()) + n += 4 + default: + o, n = ensure(b, l+6) + o[n] = mext32 + big.PutUint32(o[n+1:], uint32(l)) + o[n+5] = byte(e.ExtensionType()) + n += 6 + } + return o, e.MarshalBinaryTo(o[n:]) +} + +// ReadExtensionBytes reads an extension from 'b' into 'e' +// and returns any remaining bytes. +// Possible errors: +// - ErrShortBytes ('b' not long enough) +// - ExtensionTypeErorr{} (wire type not the same as e.Type()) +// - TypeErorr{} (next object not an extension) +// - InvalidPrefixError +// - An umarshal error returned from e.UnmarshalBinary +func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { + l := len(b) + if l < 3 { + return b, ErrShortBytes + } + lead := b[0] + var ( + sz int // size of 'data' + off int // offset of 'data' + typ int8 + ) + switch lead { + case mfixext1: + typ = int8(b[1]) + sz = 1 + off = 2 + case mfixext2: + typ = int8(b[1]) + sz = 2 + off = 2 + case mfixext4: + typ = int8(b[1]) + sz = 4 + off = 2 + case mfixext8: + typ = int8(b[1]) + sz = 8 + off = 2 + case mfixext16: + typ = int8(b[1]) + sz = 16 + off = 2 + case mext8: + sz = int(uint8(b[1])) + typ = int8(b[2]) + off = 3 + if sz == 0 { + return b[3:], e.UnmarshalBinary(b[3:3]) + } + case mext16: + if l < 4 { + return b, ErrShortBytes + } + sz = int(big.Uint16(b[1:])) + typ = int8(b[3]) + off = 4 + case mext32: + if l < 6 { + return b, ErrShortBytes + } + sz = int(big.Uint32(b[1:])) + typ = int8(b[5]) + off = 6 + default: + return b, badPrefix(ExtensionType, lead) + } + + if typ != e.ExtensionType() { + return b, errExt(typ, e.ExtensionType()) + } + + // the data of the extension starts + // at 'off' and is 'sz' bytes long + if len(b[off:]) < sz { + return b, ErrShortBytes + } + tot := off + sz + return b[tot:], e.UnmarshalBinary(b[off:tot]) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go new file mode 100644 index 000000000000..8e7370ebc298 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -0,0 +1,92 @@ +// +build linux darwin dragonfly freebsd netbsd openbsd +// +build !appengine + +package msgp + +import ( + "os" + "syscall" +) + +// ReadFile reads a file into 'dst' using +// a read-only memory mapping. Consequently, +// the file must be mmap-able, and the +// Unmarshaler should never write to +// the source memory. (Methods generated +// by the msgp tool obey that constraint, but +// user-defined implementations may not.) +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +func ReadFile(dst Unmarshaler, file *os.File) error { + stat, err := file.Stat() + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseRead(data) + _, err = dst.UnmarshalMsg(data) + uerr := syscall.Munmap(data) + if err == nil { + err = uerr + } + return err +} + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +// WriteFile writes a file from 'src' using +// memory mapping. It overwrites the entire +// contents of the previous file. +// The mapping size is calculated +// using the `Msgsize()` method +// of 'src', so it must produce a result +// equal to or greater than the actual encoded +// size of the object. Otherwise, +// a fault (SIGBUS) will occur. +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +// NOTE: The performance of this call +// is highly OS- and filesystem-dependent. +// Users should take care to test that this +// performs as expected in a production environment. +// (Linux users should run a kernel and filesystem +// that support fallocate(2) for the best results.) +func WriteFile(src MarshalSizer, file *os.File) error { + sz := src.Msgsize() + err := fallocate(file, int64(sz)) + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseWrite(data) + chunk := data[:0] + chunk, err = src.MarshalMsg(chunk) + if err != nil { + return err + } + uerr := syscall.Munmap(data) + if uerr != nil { + return uerr + } + return file.Truncate(int64(len(chunk))) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go new file mode 100644 index 000000000000..6e654dbdc258 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -0,0 +1,47 @@ +// +build windows appengine + +package msgp + +import ( + "io/ioutil" + "os" +) + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +func ReadFile(dst Unmarshaler, file *os.File) error { + if u, ok := dst.(Decodable); ok { + return u.DecodeMsg(NewReader(file)) + } + + data, err := ioutil.ReadAll(file) + if err != nil { + return err + } + _, err = dst.UnmarshalMsg(data) + return err +} + +func WriteFile(src MarshalSizer, file *os.File) error { + if e, ok := src.(Encodable); ok { + w := NewWriter(file) + err := e.EncodeMsg(w) + if err == nil { + err = w.Flush() + } + return err + } + + raw, err := src.MarshalMsg(nil) + if err != nil { + return err + } + _, err = file.Write(raw) + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go new file mode 100644 index 000000000000..f817d77598a5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -0,0 +1,174 @@ +package msgp + +/* ---------------------------------- + integer encoding utilities + (inline-able) + + TODO(tinylib): there are faster, + albeit non-portable solutions + to the code below. implement + byteswap? + ---------------------------------- */ + +func putMint64(b []byte, i int64) { + b[0] = mint64 + b[1] = byte(i >> 56) + b[2] = byte(i >> 48) + b[3] = byte(i >> 40) + b[4] = byte(i >> 32) + b[5] = byte(i >> 24) + b[6] = byte(i >> 16) + b[7] = byte(i >> 8) + b[8] = byte(i) +} + +func getMint64(b []byte) int64 { + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | + (int64(b[3]) << 40) | (int64(b[4]) << 32) | + (int64(b[5]) << 24) | (int64(b[6]) << 16) | + (int64(b[7]) << 8) | (int64(b[8])) +} + +func putMint32(b []byte, i int32) { + b[0] = mint32 + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) +} + +func getMint32(b []byte) int32 { + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) +} + +func putMint16(b []byte, i int16) { + b[0] = mint16 + b[1] = byte(i >> 8) + b[2] = byte(i) +} + +func getMint16(b []byte) (i int16) { + return (int16(b[1]) << 8) | int16(b[2]) +} + +func putMint8(b []byte, i int8) { + b[0] = mint8 + b[1] = byte(i) +} + +func getMint8(b []byte) (i int8) { + return int8(b[1]) +} + +func putMuint64(b []byte, u uint64) { + b[0] = muint64 + b[1] = byte(u >> 56) + b[2] = byte(u >> 48) + b[3] = byte(u >> 40) + b[4] = byte(u >> 32) + b[5] = byte(u >> 24) + b[6] = byte(u >> 16) + b[7] = byte(u >> 8) + b[8] = byte(u) +} + +func getMuint64(b []byte) uint64 { + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | + (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | + (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | + (uint64(b[7]) << 8) | (uint64(b[8])) +} + +func putMuint32(b []byte, u uint32) { + b[0] = muint32 + b[1] = byte(u >> 24) + b[2] = byte(u >> 16) + b[3] = byte(u >> 8) + b[4] = byte(u) +} + +func getMuint32(b []byte) uint32 { + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) +} + +func putMuint16(b []byte, u uint16) { + b[0] = muint16 + b[1] = byte(u >> 8) + b[2] = byte(u) +} + +func getMuint16(b []byte) uint16 { + return (uint16(b[1]) << 8) | uint16(b[2]) +} + +func putMuint8(b []byte, u uint8) { + b[0] = muint8 + b[1] = byte(u) +} + +func getMuint8(b []byte) uint8 { + return uint8(b[1]) +} + +func getUnix(b []byte) (sec int64, nsec int32) { + sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | + (int64(b[2]) << 40) | (int64(b[3]) << 32) | + (int64(b[4]) << 24) | (int64(b[5]) << 16) | + (int64(b[6]) << 8) | (int64(b[7])) + + nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) + return +} + +func putUnix(b []byte, sec int64, nsec int32) { + b[0] = byte(sec >> 56) + b[1] = byte(sec >> 48) + b[2] = byte(sec >> 40) + b[3] = byte(sec >> 32) + b[4] = byte(sec >> 24) + b[5] = byte(sec >> 16) + b[6] = byte(sec >> 8) + b[7] = byte(sec) + b[8] = byte(nsec >> 24) + b[9] = byte(nsec >> 16) + b[10] = byte(nsec >> 8) + b[11] = byte(nsec) +} + +/* ----------------------------- + prefix utilities + ----------------------------- */ + +// write prefix and uint8 +func prefixu8(b []byte, pre byte, sz uint8) { + b[0] = pre + b[1] = byte(sz) +} + +// write prefix and big-endian uint16 +func prefixu16(b []byte, pre byte, sz uint16) { + b[0] = pre + b[1] = byte(sz >> 8) + b[2] = byte(sz) +} + +// write prefix and big-endian uint32 +func prefixu32(b []byte, pre byte, sz uint32) { + b[0] = pre + b[1] = byte(sz >> 24) + b[2] = byte(sz >> 16) + b[3] = byte(sz >> 8) + b[4] = byte(sz) +} + +func prefixu64(b []byte, pre byte, sz uint64) { + b[0] = pre + b[1] = byte(sz >> 56) + b[2] = byte(sz >> 48) + b[3] = byte(sz >> 40) + b[4] = byte(sz >> 32) + b[5] = byte(sz >> 24) + b[6] = byte(sz >> 16) + b[7] = byte(sz >> 8) + b[8] = byte(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go new file mode 100644 index 000000000000..4325860ada06 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -0,0 +1,542 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "unicode/utf8" +) + +var ( + null = []byte("null") + hex = []byte("0123456789abcdef") +) + +var defuns [_maxtype]func(jsWriter, *Reader) (int, error) + +// note: there is an initialization loop if +// this isn't set up during init() +func init() { + // since none of these functions are inline-able, + // there is not much of a penalty to the indirect + // call. however, this is best expressed as a jump-table... + defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ + StrType: rwString, + BinType: rwBytes, + MapType: rwMap, + ArrayType: rwArray, + Float64Type: rwFloat64, + Float32Type: rwFloat32, + BoolType: rwBool, + IntType: rwInt, + UintType: rwUint, + NilType: rwNil, + ExtensionType: rwExtension, + Complex64Type: rwExtension, + Complex128Type: rwExtension, + TimeType: rwTime, + } +} + +// this is the interface +// used to write json +type jsWriter interface { + io.Writer + io.ByteWriter + WriteString(string) (int, error) +} + +// CopyToJSON reads MessagePack from 'src' and copies it +// as JSON to 'dst' until EOF. +func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { + r := NewReader(src) + n, err = r.WriteToJSON(dst) + freeR(r) + return +} + +// WriteToJSON translates MessagePack from 'r' and writes it as +// JSON to 'w' until the underlying reader returns io.EOF. It returns +// the number of bytes written, and an error if it stopped before EOF. +func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { + var j jsWriter + var bf *bufio.Writer + if jsw, ok := w.(jsWriter); ok { + j = jsw + } else { + bf = bufio.NewWriter(w) + j = bf + } + var nn int + for err == nil { + nn, err = rwNext(j, r) + n += int64(nn) + } + if err != io.EOF { + if bf != nil { + bf.Flush() + } + return + } + err = nil + if bf != nil { + err = bf.Flush() + } + return +} + +func rwNext(w jsWriter, src *Reader) (int, error) { + t, err := src.NextType() + if err != nil { + return 0, err + } + return defuns[t](w, src) +} + +func rwMap(dst jsWriter, src *Reader) (n int, err error) { + var comma bool + var sz uint32 + var field []byte + + sz, err = src.ReadMapHeader() + if err != nil { + return + } + + if sz == 0 { + return dst.WriteString("{}") + } + + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + var nn int + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + + field, err = src.ReadMapKeyPtr() + if err != nil { + return + } + nn, err = rwquoted(dst, field) + n += nn + if err != nil { + return + } + + err = dst.WriteByte(':') + if err != nil { + return + } + n++ + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + if !comma { + comma = true + } + } + + err = dst.WriteByte('}') + if err != nil { + return + } + n++ + return +} + +func rwArray(dst jsWriter, src *Reader) (n int, err error) { + err = dst.WriteByte('[') + if err != nil { + return + } + var sz uint32 + var nn int + sz, err = src.ReadArrayHeader() + if err != nil { + return + } + comma := false + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + comma = true + } + + err = dst.WriteByte(']') + if err != nil { + return + } + n++ + return +} + +func rwNil(dst jsWriter, src *Reader) (int, error) { + err := src.ReadNil() + if err != nil { + return 0, err + } + return dst.Write(null) +} + +func rwFloat32(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat32() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64) + return dst.Write(src.scratch) +} + +func rwFloat64(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32) + return dst.Write(src.scratch) +} + +func rwInt(dst jsWriter, src *Reader) (int, error) { + i, err := src.ReadInt64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) + return dst.Write(src.scratch) +} + +func rwUint(dst jsWriter, src *Reader) (int, error) { + u, err := src.ReadUint64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) + return dst.Write(src.scratch) +} + +func rwBool(dst jsWriter, src *Reader) (int, error) { + b, err := src.ReadBool() + if err != nil { + return 0, err + } + if b { + return dst.WriteString("true") + } + return dst.WriteString("false") +} + +func rwTime(dst jsWriter, src *Reader) (int, error) { + t, err := src.ReadTime() + if err != nil { + return 0, err + } + bts, err := t.MarshalJSON() + if err != nil { + return 0, err + } + return dst.Write(bts) +} + +func rwExtension(dst jsWriter, src *Reader) (n int, err error) { + et, err := src.peekExtensionType() + if err != nil { + return 0, err + } + + // registered extensions can override + // the JSON encoding + if j, ok := extensionReg[et]; ok { + var bts []byte + e := j() + err = src.ReadExtension(e) + if err != nil { + return + } + bts, err = json.Marshal(e) + if err != nil { + return + } + return dst.Write(bts) + } + + e := RawExtension{} + e.Type = et + err = src.ReadExtension(&e) + if err != nil { + return + } + + var nn int + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + + nn, err = dst.WriteString(`"type:"`) + n += nn + if err != nil { + return + } + + src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) + nn, err = dst.Write(src.scratch) + n += nn + if err != nil { + return + } + + nn, err = dst.WriteString(`,"data":"`) + n += nn + if err != nil { + return + } + + enc := base64.NewEncoder(base64.StdEncoding, dst) + + nn, err = enc.Write(e.Data) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + nn, err = dst.WriteString(`"}`) + n += nn + return +} + +func rwString(dst jsWriter, src *Reader) (n int, err error) { + var p []byte + p, err = src.R.Peek(1) + if err != nil { + return + } + lead := p[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + src.R.Skip(1) + goto write + } + + switch lead { + case mstr8: + p, err = src.R.Next(2) + if err != nil { + return + } + read = int(uint8(p[1])) + case mstr16: + p, err = src.R.Next(3) + if err != nil { + return + } + read = int(big.Uint16(p[1:])) + case mstr32: + p, err = src.R.Next(5) + if err != nil { + return + } + read = int(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +write: + p, err = src.R.Next(read) + if err != nil { + return + } + n, err = rwquoted(dst, p) + return +} + +func rwBytes(dst jsWriter, src *Reader) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + src.scratch, err = src.ReadBytes(src.scratch[:0]) + if err != nil { + return + } + enc := base64.NewEncoder(base64.StdEncoding, dst) + nn, err = enc.Write(src.scratch) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} + +// Below (c) The Go Authors, 2009-2014 +// Subject to the BSD-style license found at http://golang.org +// +// see: encoding/json/encode.go:(*encodeState).stringbytes() +func rwquoted(dst jsWriter, s []byte) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + switch b { + case '\\', '"': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte(b) + if err != nil { + return + } + n++ + case '\n': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('n') + if err != nil { + return + } + n++ + case '\r': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('r') + if err != nil { + return + } + n++ + default: + nn, err = dst.WriteString(`\u00`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[b>>4]) + if err != nil { + return + } + n++ + err = dst.WriteByte(hex[b&0xF]) + if err != nil { + return + } + n++ + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue + } + } + if c == '\u2028' || c == '\u2029' { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + } + } + i += size + } + if start < len(s) { + nn, err = dst.Write(s[start:]) + n += nn + if err != nil { + return + } + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go new file mode 100644 index 000000000000..438caf5392c5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -0,0 +1,363 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "time" +) + +var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) + +func init() { + + // NOTE(pmh): this is best expressed as a jump table, + // but gc doesn't do that yet. revisit post-go1.5. + unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ + StrType: rwStringBytes, + BinType: rwBytesBytes, + MapType: rwMapBytes, + ArrayType: rwArrayBytes, + Float64Type: rwFloat64Bytes, + Float32Type: rwFloat32Bytes, + BoolType: rwBoolBytes, + IntType: rwIntBytes, + UintType: rwUintBytes, + NilType: rwNullBytes, + ExtensionType: rwExtensionBytes, + Complex64Type: rwExtensionBytes, + Complex128Type: rwExtensionBytes, + TimeType: rwTimeBytes, + } +} + +// UnmarshalAsJSON takes raw messagepack and writes +// it as JSON to 'w'. If an error is returned, the +// bytes not translated will also be returned. If +// no errors are encountered, the length of the returned +// slice will be zero. +func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { + var ( + scratch []byte + cast bool + dst jsWriter + err error + ) + if jsw, ok := w.(jsWriter); ok { + dst = jsw + cast = true + } else { + dst = bufio.NewWriterSize(w, 512) + } + for len(msg) > 0 && err == nil { + msg, scratch, err = writeNext(dst, msg, scratch) + } + if !cast && err == nil { + err = dst.(*bufio.Writer).Flush() + } + return msg, err +} + +func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + if len(msg) < 1 { + return msg, scratch, ErrShortBytes + } + t := getType(msg[0]) + if t == InvalidType { + return msg, scratch, InvalidPrefixError(msg[0]) + } + if t == ExtensionType { + et, err := peekExtension(msg) + if err != nil { + return nil, scratch, err + } + if et == TimeExtension { + t = TimeType + } + } + return unfuns[t](w, msg, scratch) +} + +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadArrayHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('[') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte(']') + return msg, scratch, err +} + +func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadMapHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('{') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = rwMapKeyBytes(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte(':') + if err != nil { + return msg, scratch, err + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte('}') + return msg, scratch, err +} + +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return rwBytesBytes(w, msg, scratch) + } + } + return msg, scratch, err +} + +func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + str, msg, err := ReadStringZC(msg) + if err != nil { + return msg, scratch, err + } + _, err = rwquoted(w, str) + return msg, scratch, err +} + +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + bts, msg, err := ReadBytesZC(msg) + if err != nil { + return msg, scratch, err + } + l := base64.StdEncoding.EncodedLen(len(bts)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, bts) + err = w.WriteByte('"') + if err != nil { + return msg, scratch, err + } + _, err = w.Write(scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('"') + return msg, scratch, err +} + +func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, err := ReadNilBytes(msg) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(null) + return msg, scratch, err +} + +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + b, msg, err := ReadBoolBytes(msg) + if err != nil { + return msg, scratch, err + } + if b { + _, err = w.WriteString("true") + return msg, scratch, err + } + _, err = w.WriteString("false") + return msg, scratch, err +} + +func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + i, msg, err := ReadInt64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], i, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + u, msg, err := ReadUint64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendUint(scratch[0:0], u, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + var sz int + if f64 { + sz = 64 + f, msg, err = ReadFloat64Bytes(msg) + } else { + sz = 32 + var v float32 + v, msg, err = ReadFloat32Bytes(msg) + f = float64(v) + } + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float32 + var err error + f, msg, err = ReadFloat32Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + f, msg, err = ReadFloat64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var t time.Time + var err error + t, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := t.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err +} + +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var err error + var et int8 + et, err = peekExtension(msg) + if err != nil { + return msg, scratch, err + } + + // if it's time.Time + if et == TimeExtension { + var tm time.Time + tm, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := tm.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // if the extension is registered, + // use its canonical JSON form + if f, ok := extensionReg[et]; ok { + e := f() + msg, err = ReadExtensionBytes(msg, e) + if err != nil { + return msg, scratch, err + } + bts, err := json.Marshal(e) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // otherwise, write `{"type": , "data": ""}` + r := RawExtension{} + r.Type = et + msg, err = ReadExtensionBytes(msg, &r) + if err != nil { + return msg, scratch, err + } + scratch, err = writeExt(w, r, scratch) + return msg, scratch, err +} + +func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { + _, err := w.WriteString(`{"type":`) + if err != nil { + return scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`,"data":"`) + if err != nil { + return scratch, err + } + l := base64.StdEncoding.EncodedLen(len(r.Data)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, r.Data) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`"}`) + return scratch, err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go new file mode 100644 index 000000000000..ad07ef99582b --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number.go @@ -0,0 +1,267 @@ +package msgp + +import ( + "math" + "strconv" +) + +// The portable parts of the Number implementation + +// Number can be +// an int64, uint64, float32, +// or float64 internally. +// It can decode itself +// from any of the native +// messagepack number types. +// The zero-value of Number +// is Int(0). Using the equality +// operator with Number compares +// both the type and the value +// of the number. +type Number struct { + // internally, this + // is just a tagged union. + // the raw bits of the number + // are stored the same way regardless. + bits uint64 + typ Type +} + +// AsInt sets the number to an int64. +func (n *Number) AsInt(i int64) { + + // we always store int(0) + // as {0, InvalidType} in + // order to preserve + // the behavior of the == operator + if i == 0 { + n.typ = InvalidType + n.bits = 0 + return + } + + n.typ = IntType + n.bits = uint64(i) +} + +// AsUint sets the number to a uint64. +func (n *Number) AsUint(u uint64) { + n.typ = UintType + n.bits = u +} + +// AsFloat32 sets the value of the number +// to a float32. +func (n *Number) AsFloat32(f float32) { + n.typ = Float32Type + n.bits = uint64(math.Float32bits(f)) +} + +// AsFloat64 sets the value of the +// number to a float64. +func (n *Number) AsFloat64(f float64) { + n.typ = Float64Type + n.bits = math.Float64bits(f) +} + +// Int casts the number as an int64, and +// returns whether or not that was the +// underlying type. +func (n *Number) Int() (int64, bool) { + return int64(n.bits), n.typ == IntType || n.typ == InvalidType +} + +// Uint casts the number as a uint64, and returns +// whether or not that was the underlying type. +func (n *Number) Uint() (uint64, bool) { + return n.bits, n.typ == UintType +} + +// Float casts the number to a float64, and +// returns whether or not that was the underlying +// type (either a float64 or a float32). +func (n *Number) Float() (float64, bool) { + switch n.typ { + case Float32Type: + return float64(math.Float32frombits(uint32(n.bits))), true + case Float64Type: + return math.Float64frombits(n.bits), true + default: + return 0.0, false + } +} + +// Type will return one of: +// Float64Type, Float32Type, UintType, or IntType. +func (n *Number) Type() Type { + if n.typ == InvalidType { + return IntType + } + return n.typ +} + +// DecodeMsg implements msgp.Decodable +func (n *Number) DecodeMsg(r *Reader) error { + typ, err := r.NextType() + if err != nil { + return err + } + switch typ { + case Float32Type: + f, err := r.ReadFloat32() + if err != nil { + return err + } + n.AsFloat32(f) + return nil + case Float64Type: + f, err := r.ReadFloat64() + if err != nil { + return err + } + n.AsFloat64(f) + return nil + case IntType: + i, err := r.ReadInt64() + if err != nil { + return err + } + n.AsInt(i) + return nil + case UintType: + u, err := r.ReadUint64() + if err != nil { + return err + } + n.AsUint(u) + return nil + default: + return TypeError{Encoded: typ, Method: IntType} + } +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { + typ := NextType(b) + switch typ { + case IntType: + i, o, err := ReadInt64Bytes(b) + if err != nil { + return b, err + } + n.AsInt(i) + return o, nil + case UintType: + u, o, err := ReadUint64Bytes(b) + if err != nil { + return b, err + } + n.AsUint(u) + return o, nil + case Float64Type: + f, o, err := ReadFloat64Bytes(b) + if err != nil { + return b, err + } + n.AsFloat64(f) + return o, nil + case Float32Type: + f, o, err := ReadFloat32Bytes(b) + if err != nil { + return b, err + } + n.AsFloat32(f) + return o, nil + default: + return b, TypeError{Method: IntType, Encoded: typ} + } +} + +// MarshalMsg implements msgp.Marshaler +func (n *Number) MarshalMsg(b []byte) ([]byte, error) { + switch n.typ { + case IntType: + return AppendInt64(b, int64(n.bits)), nil + case UintType: + return AppendUint64(b, uint64(n.bits)), nil + case Float64Type: + return AppendFloat64(b, math.Float64frombits(n.bits)), nil + case Float32Type: + return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil + default: + return AppendInt64(b, 0), nil + } +} + +// EncodeMsg implements msgp.Encodable +func (n *Number) EncodeMsg(w *Writer) error { + switch n.typ { + case IntType: + return w.WriteInt64(int64(n.bits)) + case UintType: + return w.WriteUint64(n.bits) + case Float64Type: + return w.WriteFloat64(math.Float64frombits(n.bits)) + case Float32Type: + return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) + default: + return w.WriteInt64(0) + } +} + +// Msgsize implements msgp.Sizer +func (n *Number) Msgsize() int { + switch n.typ { + case Float32Type: + return Float32Size + case Float64Type: + return Float64Size + case IntType: + return Int64Size + case UintType: + return Uint64Size + default: + return 1 // fixint(0) + } +} + +// MarshalJSON implements json.Marshaler +func (n *Number) MarshalJSON() ([]byte, error) { + t := n.Type() + if t == InvalidType { + return []byte{'0'}, nil + } + out := make([]byte, 0, 32) + switch t { + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.AppendFloat(out, f, 'f', -1, 64), nil + case IntType: + i, _ := n.Int() + return strconv.AppendInt(out, i, 10), nil + case UintType: + u, _ := n.Uint() + return strconv.AppendUint(out, u, 10), nil + default: + panic("(*Number).typ is invalid") + } +} + +// String implements fmt.Stringer +func (n *Number) String() string { + switch n.typ { + case InvalidType: + return "0" + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.FormatFloat(f, 'f', -1, 64) + case IntType: + i, _ := n.Int() + return strconv.FormatInt(i, 10) + case UintType: + u, _ := n.Uint() + return strconv.FormatUint(u, 10) + default: + panic("(*Number).typ is invalid") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go new file mode 100644 index 000000000000..20cd1ef893ba --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -0,0 +1,1265 @@ +package msgp + +import ( + "io" + "math" + "sync" + "time" + + "github.com/philhofer/fwd" +) + +// where we keep old *Readers +var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} + +// Type is a MessagePack wire type, +// including this package's built-in +// extension types. +type Type byte + +// MessagePack Types +// +// The zero value of Type +// is InvalidType. +const ( + InvalidType Type = iota + + // MessagePack built-in types + + StrType + BinType + MapType + ArrayType + Float64Type + Float32Type + BoolType + IntType + UintType + NilType + ExtensionType + + // pseudo-types provided + // by extensions + + Complex64Type + Complex128Type + TimeType + + _maxtype +) + +// String implements fmt.Stringer +func (t Type) String() string { + switch t { + case StrType: + return "str" + case BinType: + return "bin" + case MapType: + return "map" + case ArrayType: + return "array" + case Float64Type: + return "float64" + case Float32Type: + return "float32" + case BoolType: + return "bool" + case UintType: + return "uint" + case IntType: + return "int" + case ExtensionType: + return "ext" + case NilType: + return "nil" + default: + return "" + } +} + +func freeR(m *Reader) { + readerPool.Put(m) +} + +// Unmarshaler is the interface fulfilled +// by objects that know how to unmarshal +// themselves from MessagePack. +// UnmarshalMsg unmarshals the object +// from binary, returing any leftover +// bytes and any errors encountered. +type Unmarshaler interface { + UnmarshalMsg([]byte) ([]byte, error) +} + +// Decodable is the interface fulfilled +// by objects that know how to read +// themselves from a *Reader. +type Decodable interface { + DecodeMsg(*Reader) error +} + +// Decode decodes 'd' from 'r'. +func Decode(r io.Reader, d Decodable) error { + rd := NewReader(r) + err := d.DecodeMsg(rd) + freeR(rd) + return err +} + +// NewReader returns a *Reader that +// reads from the provided reader. The +// reader will be buffered. +func NewReader(r io.Reader) *Reader { + p := readerPool.Get().(*Reader) + if p.R == nil { + p.R = fwd.NewReader(r) + } else { + p.R.Reset(r) + } + return p +} + +// NewReaderSize returns a *Reader with a buffer of the given size. +// (This is vastly preferable to passing the decoder a reader that is already buffered.) +func NewReaderSize(r io.Reader, sz int) *Reader { + return &Reader{R: fwd.NewReaderSize(r, sz)} +} + +// Reader wraps an io.Reader and provides +// methods to read MessagePack-encoded values +// from it. Readers are buffered. +type Reader struct { + // R is the buffered reader + // that the Reader uses + // to decode MessagePack. + // The Reader itself + // is stateless; all the + // buffering is done + // within R. + R *fwd.Reader + scratch []byte +} + +// Read implements `io.Reader` +func (m *Reader) Read(p []byte) (int, error) { + return m.R.Read(p) +} + +// CopyNext reads the next object from m without decoding it and writes it to w. +// It avoids unnecessary copies internally. +func (m *Reader) CopyNext(w io.Writer) (int64, error) { + sz, o, err := getNextSize(m.R) + if err != nil { + return 0, err + } + + var n int64 + // Opportunistic optimization: if we can fit the whole thing in the m.R + // buffer, then just get a pointer to that, and pass it to w.Write, + // avoiding an allocation. + if int(sz) <= m.R.BufferSize() { + var nn int + var buf []byte + buf, err = m.R.Next(int(sz)) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + return 0, err + } + nn, err = w.Write(buf) + n += int64(nn) + } else { + // Fall back to io.CopyN. + // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) + n, err = io.CopyN(w, m.R, int64(sz)) + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + } + if err != nil { + return n, err + } else if n < int64(sz) { + return n, io.ErrShortWrite + } + + // for maps and slices, read elements + for x := uintptr(0); x < o; x++ { + var n2 int64 + n2, err = m.CopyNext(w) + if err != nil { + return n, err + } + n += n2 + } + return n, nil +} + +// ReadFull implements `io.ReadFull` +func (m *Reader) ReadFull(p []byte) (int, error) { + return m.R.ReadFull(p) +} + +// Reset resets the underlying reader. +func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } + +// Buffered returns the number of bytes currently in the read buffer. +func (m *Reader) Buffered() int { return m.R.Buffered() } + +// BufferSize returns the capacity of the read buffer. +func (m *Reader) BufferSize() int { return m.R.BufferSize() } + +// NextType returns the next object type to be decoded. +func (m *Reader) NextType() (Type, error) { + p, err := m.R.Peek(1) + if err != nil { + return InvalidType, err + } + t := getType(p[0]) + if t == InvalidType { + return t, InvalidPrefixError(p[0]) + } + if t == ExtensionType { + v, err := m.peekExtensionType() + if err != nil { + return InvalidType, err + } + switch v { + case Complex64Extension: + return Complex64Type, nil + case Complex128Extension: + return Complex128Type, nil + case TimeExtension: + return TimeType, nil + } + } + return t, nil +} + +// IsNil returns whether or not +// the next byte is a null messagepack byte +func (m *Reader) IsNil() bool { + p, err := m.R.Peek(1) + return err == nil && p[0] == mnil +} + +// getNextSize returns the size of the next object on the wire. +// returns (obj size, obj elements, error) +// only maps and arrays have non-zero obj elements +// for maps and arrays, obj size does not include elements +// +// use uintptr b/c it's guaranteed to be large enough +// to hold whatever we can fit in memory. +func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { + b, err := r.Peek(1) + if err != nil { + return 0, 0, err + } + lead := b[0] + spec := &sizes[lead] + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { + return uintptr(size), uintptr(mode), nil + } + b, err = r.Peek(int(size)) + if err != nil { + return 0, 0, err + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// Skip skips over the next object, regardless of +// its type. If it is an array or map, the whole array +// or map will be skipped. +func (m *Reader) Skip() error { + var ( + v uintptr // bytes + o uintptr // objects + err error + p []byte + ) + + // we can use the faster + // method if we have enough + // buffered data + if m.R.Buffered() >= 5 { + p, err = m.R.Peek(5) + if err != nil { + return err + } + v, o, err = getSize(p) + if err != nil { + return err + } + } else { + v, o, err = getNextSize(m.R) + if err != nil { + return err + } + } + + // 'v' is always non-zero + // if err == nil + _, err = m.R.Skip(int(v)) + if err != nil { + return err + } + + // for maps and slices, skip elements + for x := uintptr(0); x < o; x++ { + err = m.Skip() + if err != nil { + return err + } + } + return nil +} + +// ReadMapHeader reads the next object +// as a map header and returns the size +// of the map and the number of bytes written. +// It will return a TypeError{} if the next +// object is not a map. +func (m *Reader) ReadMapHeader() (sz uint32, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mmap16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mmap32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKey reads either a 'str' or 'bin' field from +// the reader and returns the value as a []byte. It uses +// scratch for storage if it is large enough. +func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { + out, err := m.ReadStringAsBytes(scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return m.ReadBytes(scratch) + } + return nil, err + } + return out, nil +} + +// MapKeyPtr returns a []byte pointing to the contents +// of a valid map key. The key cannot be empty, and it +// must be shorter than the total buffer size of the +// *Reader. Additionally, the returned slice is only +// valid until the next *Reader method call. Users +// should exercise extreme care when using this +// method; writing into the returned slice may +// corrupt future reads. +func (m *Reader) ReadMapKeyPtr() ([]byte, error) { + p, err := m.R.Peek(1) + if err != nil { + return nil, err + } + lead := p[0] + var read int + if isfixstr(lead) { + read = int(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + switch lead { + case mstr8, mbin8: + p, err = m.R.Next(2) + if err != nil { + return nil, err + } + read = int(p[1]) + case mstr16, mbin16: + p, err = m.R.Next(3) + if err != nil { + return nil, err + } + read = int(big.Uint16(p[1:])) + case mstr32, mbin32: + p, err = m.R.Next(5) + if err != nil { + return nil, err + } + read = int(big.Uint32(p[1:])) + default: + return nil, badPrefix(StrType, lead) + } +fill: + if read == 0 { + return nil, ErrShortBytes + } + return m.R.Next(read) +} + +// ReadArrayHeader reads the next object as an +// array header and returns the size of the array +// and the number of bytes read. +func (m *Reader) ReadArrayHeader() (sz uint32, err error) { + var lead byte + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case marray16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + + case marray32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNil reads a 'nil' MessagePack byte from the reader +func (m *Reader) ReadNil() error { + p, err := m.R.Peek(1) + if err != nil { + return err + } + if p[0] != mnil { + return badPrefix(NilType, p[0]) + } + _, err = m.R.Skip(1) + return err +} + +// ReadFloat64 reads a float64 from the reader. +// (If the value on the wire is encoded as a float32, +// it will be up-cast to a float64.) +func (m *Reader) ReadFloat64() (f float64, err error) { + var p []byte + p, err = m.R.Peek(9) + if err != nil { + // we'll allow a coversion from float32 to float64, + // since we don't lose any precision + if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + return + } + if p[0] != mfloat64 { + // see above + if p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + err = badPrefix(Float64Type, p[0]) + return + } + f = math.Float64frombits(getMuint64(p)) + _, err = m.R.Skip(9) + return +} + +// ReadFloat32 reads a float32 from the reader +func (m *Reader) ReadFloat32() (f float32, err error) { + var p []byte + p, err = m.R.Peek(5) + if err != nil { + return + } + if p[0] != mfloat32 { + err = badPrefix(Float32Type, p[0]) + return + } + f = math.Float32frombits(getMuint32(p)) + _, err = m.R.Skip(5) + return +} + +// ReadBool reads a bool from the reader +func (m *Reader) ReadBool() (b bool, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mtrue: + b = true + case mfalse: + default: + err = badPrefix(BoolType, p[0]) + return + } + _, err = m.R.Skip(1) + return +} + +// ReadInt64 reads an int64 from the reader +func (m *Reader) ReadInt64() (i int64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixint(lead) { + i = int64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } else if isnfixint(lead) { + i = int64(rnfixint(lead)) + _, err = m.R.Skip(1) + return + } + + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + i = getMint64(p) + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32 reads an int32 from the reader +func (m *Reader) ReadInt32() (i int32, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt32 || in < math.MinInt32 { + err = IntOverflow{Value: in, FailedBitsize: 32} + return + } + i = int32(in) + return +} + +// ReadInt16 reads an int16 from the reader +func (m *Reader) ReadInt16() (i int16, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt16 || in < math.MinInt16 { + err = IntOverflow{Value: in, FailedBitsize: 16} + return + } + i = int16(in) + return +} + +// ReadInt8 reads an int8 from the reader +func (m *Reader) ReadInt8() (i int8, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt8 || in < math.MinInt8 { + err = IntOverflow{Value: in, FailedBitsize: 8} + return + } + i = int8(in) + return +} + +// ReadInt reads an int from the reader +func (m *Reader) ReadInt() (i int, err error) { + if smallint { + var in int32 + in, err = m.ReadInt32() + i = int(in) + return + } + var in int64 + in, err = m.ReadInt64() + i = int(in) + return +} + +// ReadUint64 reads a uint64 from the reader +func (m *Reader) ReadUint64() (u uint64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + u = uint64(getMuint8(p)) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + u = uint64(getMuint16(p)) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + u = uint64(getMuint32(p)) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u = getMuint64(p) + return + + default: + err = badPrefix(UintType, lead) + return + + } +} + +// ReadUint32 reads a uint32 from the reader +func (m *Reader) ReadUint32() (u uint32, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint32 { + err = UintOverflow{Value: in, FailedBitsize: 32} + return + } + u = uint32(in) + return +} + +// ReadUint16 reads a uint16 from the reader +func (m *Reader) ReadUint16() (u uint16, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint16 { + err = UintOverflow{Value: in, FailedBitsize: 16} + return + } + u = uint16(in) + return +} + +// ReadUint8 reads a uint8 from the reader +func (m *Reader) ReadUint8() (u uint8, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + u = uint8(in) + return +} + +// ReadUint reads a uint from the reader +func (m *Reader) ReadUint() (u uint, err error) { + if smallint { + var un uint32 + un, err = m.ReadUint32() + u = uint(un) + return + } + var un uint64 + un, err = m.ReadUint64() + u = uint(un) + return +} + +// ReadByte is analogous to ReadUint8. +// +// NOTE: this is *not* an implementation +// of io.ByteReader. +func (m *Reader) ReadByte() (b byte, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + b = byte(in) + return +} + +// ReadBytes reads a MessagePack 'bin' object +// from the reader and returns its value. It may +// use 'scratch' for storage if it is non-nil. +func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead = p[0] + var read int64 + switch lead { + case mbin8: + read = int64(p[1]) + m.R.Skip(2) + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(BinType, lead) + return + } + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadBytesHeader reads the size header +// of a MessagePack 'bin' object. The user +// is responsible for dealing with the next +// 'sz' bytes from the reader in an application-specific +// way. +func (m *Reader) ReadBytesHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mbin8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = uint32(big.Uint32(p[1:])) + return + default: + err = badPrefix(BinType, p[0]) + return + } +} + +// ReadExactBytes reads a MessagePack 'bin'-encoded +// object off of the wire into the provided slice. An +// ArrayError will be returned if the object is not +// exactly the length of the input slice. +func (m *Reader) ReadExactBytes(into []byte) error { + p, err := m.R.Peek(2) + if err != nil { + return err + } + lead := p[0] + var read int64 // bytes to read + var skip int // prefix size to skip + switch lead { + case mbin8: + read = int64(p[1]) + skip = 2 + case mbin16: + p, err = m.R.Peek(3) + if err != nil { + return err + } + read = int64(big.Uint16(p[1:])) + skip = 3 + case mbin32: + p, err = m.R.Peek(5) + if err != nil { + return err + } + read = int64(big.Uint32(p[1:])) + skip = 5 + default: + return badPrefix(BinType, lead) + } + if read != int64(len(into)) { + return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} + } + m.R.Skip(skip) + _, err = m.R.ReadFull(into) + return err +} + +// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string +// and returns its value as bytes. It may use 'scratch' for storage +// if it is non-nil. +func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + var read int64 + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadStringHeader reads a string header +// off of the wire. The user is then responsible +// for dealing with the next 'sz' bytes from +// the reader in an application-specific manner. +func (m *Reader) ReadStringHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead := p[0] + if isfixstr(lead) { + sz = uint32(rfixstr(lead)) + m.R.Skip(1) + return + } + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(StrType, lead) + return + } +} + +// ReadString reads a utf-8 string from the reader +func (m *Reader) ReadString() (s string, err error) { + var p []byte + var lead byte + var read int64 + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if read == 0 { + s, err = "", nil + return + } + // reading into the memory + // that will become the string + // itself has vastly superior + // worst-case performance, because + // the reader buffer doesn't have + // to be large enough to hold the string. + // the idea here is to make it more + // difficult for someone malicious + // to cause the system to run out of + // memory by sending very large strings. + // + // NOTE: this works because the argument + // passed to (*fwd.Reader).ReadFull escapes + // to the heap; its argument may, in turn, + // be passed to the underlying reader, and + // thus escape analysis *must* conclude that + // 'out' escapes. + out := make([]byte, read) + _, err = m.R.ReadFull(out) + if err != nil { + return + } + s = UnsafeString(out) + return +} + +// ReadComplex64 reads a complex64 from the reader +func (m *Reader) ReadComplex64() (f complex64, err error) { + var p []byte + p, err = m.R.Peek(10) + if err != nil { + return + } + if p[0] != mfixext8 { + err = badPrefix(Complex64Type, p[0]) + return + } + if int8(p[1]) != Complex64Extension { + err = errExt(int8(p[1]), Complex64Extension) + return + } + f = complex(math.Float32frombits(big.Uint32(p[2:])), + math.Float32frombits(big.Uint32(p[6:]))) + _, err = m.R.Skip(10) + return +} + +// ReadComplex128 reads a complex128 from the reader +func (m *Reader) ReadComplex128() (f complex128, err error) { + var p []byte + p, err = m.R.Peek(18) + if err != nil { + return + } + if p[0] != mfixext16 { + err = badPrefix(Complex128Type, p[0]) + return + } + if int8(p[1]) != Complex128Extension { + err = errExt(int8(p[1]), Complex128Extension) + return + } + f = complex(math.Float64frombits(big.Uint64(p[2:])), + math.Float64frombits(big.Uint64(p[10:]))) + _, err = m.R.Skip(18) + return +} + +// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. +// (You must pass a non-nil map into the function.) +func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { + var sz uint32 + sz, err = m.ReadMapHeader() + if err != nil { + return + } + for key := range mp { + delete(mp, key) + } + for i := uint32(0); i < sz; i++ { + var key string + var val interface{} + key, err = m.ReadString() + if err != nil { + return + } + val, err = m.ReadIntf() + if err != nil { + return + } + mp[key] = val + } + return +} + +// ReadTime reads a time.Time object from the reader. +// The returned time's location will be set to time.Local. +func (m *Reader) ReadTime() (t time.Time, err error) { + var p []byte + p, err = m.R.Peek(15) + if err != nil { + return + } + if p[0] != mext8 || p[1] != 12 { + err = badPrefix(TimeType, p[0]) + return + } + if int8(p[2]) != TimeExtension { + err = errExt(int8(p[2]), TimeExtension) + return + } + sec, nsec := getUnix(p[3:]) + t = time.Unix(sec, int64(nsec)).Local() + _, err = m.R.Skip(15) + return +} + +// ReadIntf reads out the next object as a raw interface{}. +// Arrays are decoded as []interface{}, and maps are decoded +// as map[string]interface{}. Integers are decoded as int64 +// and unsigned integers are decoded as uint64. +func (m *Reader) ReadIntf() (i interface{}, err error) { + var t Type + t, err = m.NextType() + if err != nil { + return + } + switch t { + case BoolType: + i, err = m.ReadBool() + return + + case IntType: + i, err = m.ReadInt64() + return + + case UintType: + i, err = m.ReadUint64() + return + + case BinType: + i, err = m.ReadBytes(nil) + return + + case StrType: + i, err = m.ReadString() + return + + case Complex64Type: + i, err = m.ReadComplex64() + return + + case Complex128Type: + i, err = m.ReadComplex128() + return + + case TimeType: + i, err = m.ReadTime() + return + + case ExtensionType: + var t int8 + t, err = m.peekExtensionType() + if err != nil { + return + } + f, ok := extensionReg[t] + if ok { + e := f() + err = m.ReadExtension(e) + i = e + return + } + var e RawExtension + e.Type = t + err = m.ReadExtension(&e) + i = &e + return + + case MapType: + mp := make(map[string]interface{}) + err = m.ReadMapStrIntf(mp) + i = mp + return + + case NilType: + err = m.ReadNil() + i = nil + return + + case Float32Type: + i, err = m.ReadFloat32() + return + + case Float64Type: + i, err = m.ReadFloat64() + return + + case ArrayType: + var sz uint32 + sz, err = m.ReadArrayHeader() + + if err != nil { + return + } + out := make([]interface{}, int(sz)) + for j := range out { + out[j], err = m.ReadIntf() + if err != nil { + return + } + } + i = out + return + + default: + return nil, fatal // unreachable + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go new file mode 100644 index 000000000000..78e466fc1f15 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -0,0 +1,1089 @@ +package msgp + +import ( + "bytes" + "encoding/binary" + "math" + "time" +) + +var big = binary.BigEndian + +// NextType returns the type of the next +// object in the slice. If the length +// of the input is zero, it returns +// InvalidType. +func NextType(b []byte) Type { + if len(b) == 0 { + return InvalidType + } + spec := sizes[b[0]] + t := spec.typ + if t == ExtensionType && len(b) > int(spec.size) { + var tp int8 + if spec.extra == constsize { + tp = int8(b[1]) + } else { + tp = int8(b[spec.size-1]) + } + switch tp { + case TimeExtension: + return TimeType + case Complex128Extension: + return Complex128Type + case Complex64Extension: + return Complex64Type + default: + return ExtensionType + } + } + return t +} + +// IsNil returns true if len(b)>0 and +// the leading byte is a 'nil' MessagePack +// byte; false otherwise +func IsNil(b []byte) bool { + if len(b) != 0 && b[0] == mnil { + return true + } + return false +} + +// Raw is raw MessagePack. +// Raw allows you to read and write +// data without interpreting its contents. +type Raw []byte + +// MarshalMsg implements msgp.Marshaler. +// It appends the raw contents of 'raw' +// to the provided byte slice. If 'raw' +// is 0 bytes, 'nil' will be appended instead. +func (r Raw) MarshalMsg(b []byte) ([]byte, error) { + i := len(r) + if i == 0 { + return AppendNil(b), nil + } + o, l := ensure(b, i) + copy(o[l:], []byte(r)) + return o, nil +} + +// UnmarshalMsg implements msgp.Unmarshaler. +// It sets the contents of *Raw to be the next +// object in the provided byte slice. +func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { + l := len(b) + out, err := Skip(b) + if err != nil { + return b, err + } + rlen := l - len(out) + if cap(*r) < rlen { + *r = make(Raw, rlen) + } else { + *r = (*r)[0:rlen] + } + copy(*r, b[:rlen]) + return out, nil +} + +// EncodeMsg implements msgp.Encodable. +// It writes the raw bytes to the writer. +// If r is empty, it writes 'nil' instead. +func (r Raw) EncodeMsg(w *Writer) error { + if len(r) == 0 { + return w.WriteNil() + } + _, err := w.Write([]byte(r)) + return err +} + +// DecodeMsg implements msgp.Decodable. +// It sets the value of *Raw to be the +// next object on the wire. +func (r *Raw) DecodeMsg(f *Reader) error { + *r = (*r)[:0] + return appendNext(f, (*[]byte)(r)) +} + +// Msgsize implements msgp.Sizer +func (r Raw) Msgsize() int { + l := len(r) + if l == 0 { + return 1 // for 'nil' + } + return l +} + +func appendNext(f *Reader, d *[]byte) error { + amt, o, err := getNextSize(f.R) + if err != nil { + return err + } + var i int + *d, i = ensure(*d, int(amt)) + _, err = f.R.ReadFull((*d)[i:]) + if err != nil { + return err + } + for o > 0 { + err = appendNext(f, d) + if err != nil { + return err + } + o-- + } + return nil +} + +// MarshalJSON implements json.Marshaler +func (r *Raw) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + _, err := UnmarshalAsJSON(&buf, []byte(*r)) + return buf.Bytes(), err +} + +// ReadMapHeaderBytes reads a map header size +// from 'b' and returns the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a map) +func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + o = b[1:] + return + } + + switch lead { + case mmap16: + if l < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case mmap32: + if l < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKeyZC attempts to read a map key +// from 'b' and returns the key bytes and the remaining bytes +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a str or bin) +func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { + o, b, err := ReadStringZC(b) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return ReadBytesZC(b) + } + return nil, b, err + } + return o, b, nil +} + +// ReadArrayHeaderBytes attempts to read +// the array header size off of 'b' and return +// the size and remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not an array) +func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + lead := b[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + o = b[1:] + return + } + + switch lead { + case marray16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case marray32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNilBytes tries to read a "nil" byte +// off of 'b' and return the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'nil') +// - InvalidPrefixError +func ReadNilBytes(b []byte) ([]byte, error) { + if len(b) < 1 { + return nil, ErrShortBytes + } + if b[0] != mnil { + return b, badPrefix(NilType, b[0]) + } + return b[1:], nil +} + +// ReadFloat64Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float64) +func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { + if len(b) < 9 { + if len(b) >= 5 && b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = ErrShortBytes + return + } + + if b[0] != mfloat64 { + if b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = badPrefix(Float64Type, b[0]) + return + } + + f = math.Float64frombits(getMuint64(b)) + o = b[9:] + return +} + +// ReadFloat32Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float32) +func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { + if len(b) < 5 { + err = ErrShortBytes + return + } + + if b[0] != mfloat32 { + err = TypeError{Method: Float32Type, Encoded: getType(b[0])} + return + } + + f = math.Float32frombits(getMuint32(b)) + o = b[5:] + return +} + +// ReadBoolBytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a bool) +func ReadBoolBytes(b []byte) (bool, []byte, error) { + if len(b) < 1 { + return false, b, ErrShortBytes + } + switch b[0] { + case mtrue: + return true, b[1:], nil + case mfalse: + return false, b[1:], nil + default: + return false, b, badPrefix(BoolType, b[0]) + } +} + +// ReadInt64Bytes tries to read an int64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError (not a int) +func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + i = int64(rfixint(lead)) + o = b[1:] + return + } + if isnfixint(lead) { + i = int64(rnfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + i = getMint64(b) + o = b[9:] + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32Bytes tries to read an int32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int32) +func ReadInt32Bytes(b []byte) (int32, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt32 || i < math.MinInt32 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 32} + } + return int32(i), o, err +} + +// ReadInt16Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int16) +func ReadInt16Bytes(b []byte) (int16, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt16 || i < math.MinInt16 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 16} + } + return int16(i), o, err +} + +// ReadInt8Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int8) +func ReadInt8Bytes(b []byte) (int8, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt8 || i < math.MinInt8 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 8} + } + return int8(i), o, err +} + +// ReadIntBytes tries to read an int +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) +func ReadIntBytes(b []byte) (int, []byte, error) { + if smallint { + i, b, err := ReadInt32Bytes(b) + return int(i), b, err + } + i, b, err := ReadInt64Bytes(b) + return int(i), b, err +} + +// ReadUint64Bytes tries to read a uint64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + o = b[1:] + return + } + + switch lead { + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + u = uint64(getMuint8(b)) + o = b[2:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + u = uint64(getMuint16(b)) + o = b[3:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + u = uint64(getMuint32(b)) + o = b[5:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u = getMuint64(b) + o = b[9:] + return + + default: + err = badPrefix(UintType, lead) + return + } +} + +// ReadUint32Bytes tries to read a uint32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint32) +func ReadUint32Bytes(b []byte) (uint32, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint32 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} + } + return uint32(v), o, err +} + +// ReadUint16Bytes tries to read a uint16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint16) +func ReadUint16Bytes(b []byte) (uint16, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint16 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} + } + return uint16(v), o, err +} + +// ReadUint8Bytes tries to read a uint8 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint8) +func ReadUint8Bytes(b []byte) (uint8, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint8 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} + } + return uint8(v), o, err +} + +// ReadUintBytes tries to read a uint +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint; 32-bit platforms only) +func ReadUintBytes(b []byte) (uint, []byte, error) { + if smallint { + u, b, err := ReadUint32Bytes(b) + return uint(u), b, err + } + u, b, err := ReadUint64Bytes(b) + return uint(u), b, err +} + +// ReadByteBytes is analogous to ReadUint8Bytes +func ReadByteBytes(b []byte) (byte, []byte, error) { + return ReadUint8Bytes(b) +} + +// ReadBytesBytes reads a 'bin' object +// from 'b' and returns its vaue and +// the remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'bin' object) +func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, scratch, false) +} + +func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = int(b[1]) + b = b[2:] + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = badPrefix(BinType, lead) + return + } + + if len(b) < read { + err = ErrShortBytes + return + } + + // zero-copy + if zc { + v = b[0:read] + o = b[read:] + return + } + + if cap(scratch) >= read { + v = scratch[0:read] + } else { + v = make([]byte, read) + } + + o = b[copy(v, b):] + return +} + +// ReadBytesZC extracts the messagepack-encoded +// binary field without copying. The returned []byte +// points to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'bin') +func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, nil, true) +} + +func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + var read uint32 + var skip int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = uint32(b[1]) + skip = 2 + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = uint32(big.Uint16(b[1:])) + skip = 3 + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = uint32(big.Uint32(b[1:])) + skip = 5 + + default: + err = badPrefix(BinType, lead) + return + } + + if read != uint32(len(into)) { + err = ArrayError{Wanted: uint32(len(into)), Got: read} + return + } + + o = b[skip+copy(into, b[skip:]):] + return +} + +// ReadStringZC reads a messagepack string field +// without copying. The returned []byte points +// to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'str') +func ReadStringZC(b []byte) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + b = b[1:] + } else { + switch lead { + case mstr8: + if l < 2 { + err = ErrShortBytes + return + } + read = int(b[1]) + b = b[2:] + + case mstr16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mstr32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = TypeError{Method: StrType, Encoded: getType(lead)} + return + } + } + + if len(b) < read { + err = ErrShortBytes + return + } + + v = b[0:read] + o = b[read:] + return +} + +// ReadStringBytes reads a 'str' object +// from 'b' and returns its value and the +// remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError +func ReadStringBytes(b []byte) (string, []byte, error) { + v, o, err := ReadStringZC(b) + return string(v), o, err +} + +// ReadStringAsBytes reads a 'str' object +// into a slice of bytes. 'v' is the value of +// the 'str' object, which may reside in memory +// pointed to by 'scratch.' 'o' is the remaining bytes +// in 'b.'' +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError (unknown type marker) +func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + var tmp []byte + tmp, o, err = ReadStringZC(b) + v = append(scratch[:0], tmp...) + return +} + +// ReadComplex128Bytes reads a complex128 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex128) +// - InvalidPrefixError +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) +func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { + if len(b) < 18 { + err = ErrShortBytes + return + } + if b[0] != mfixext16 { + err = badPrefix(Complex128Type, b[0]) + return + } + if int8(b[1]) != Complex128Extension { + err = errExt(int8(b[1]), Complex128Extension) + return + } + c = complex(math.Float64frombits(big.Uint64(b[2:])), + math.Float64frombits(big.Uint64(b[10:]))) + o = b[18:] + return +} + +// ReadComplex64Bytes reads a complex64 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) +func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { + if len(b) < 10 { + err = ErrShortBytes + return + } + if b[0] != mfixext8 { + err = badPrefix(Complex64Type, b[0]) + return + } + if b[1] != Complex64Extension { + err = errExt(int8(b[1]), Complex64Extension) + return + } + c = complex(math.Float32frombits(big.Uint32(b[2:])), + math.Float32frombits(big.Uint32(b[6:]))) + o = b[10:] + return +} + +// ReadTimeBytes reads a time.Time +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) +func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { + if len(b) < 15 { + err = ErrShortBytes + return + } + if b[0] != mext8 || b[1] != 12 { + err = badPrefix(TimeType, b[0]) + return + } + if int8(b[2]) != TimeExtension { + err = errExt(int8(b[2]), TimeExtension) + return + } + sec, nsec := getUnix(b[3:]) + t = time.Unix(sec, int64(nsec)).Local() + o = b[15:] + return +} + +// ReadMapStrIntfBytes reads a map[string]interface{} +// out of 'b' and returns the map and remaining bytes. +// If 'old' is non-nil, the values will be read into that map. +func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + var sz uint32 + o = b + sz, o, err = ReadMapHeaderBytes(o) + + if err != nil { + return + } + + if old != nil { + for key := range old { + delete(old, key) + } + v = old + } else { + v = make(map[string]interface{}, int(sz)) + } + + for z := uint32(0); z < sz; z++ { + if len(o) < 1 { + err = ErrShortBytes + return + } + var key []byte + key, o, err = ReadMapKeyZC(o) + if err != nil { + return + } + var val interface{} + val, o, err = ReadIntfBytes(o) + if err != nil { + return + } + v[string(key)] = val + } + return +} + +// ReadIntfBytes attempts to read +// the next object out of 'b' as a raw interface{} and +// return the remaining bytes. +func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + if len(b) < 1 { + err = ErrShortBytes + return + } + + k := NextType(b) + + switch k { + case MapType: + i, o, err = ReadMapStrIntfBytes(b, nil) + return + + case ArrayType: + var sz uint32 + sz, o, err = ReadArrayHeaderBytes(b) + if err != nil { + return + } + j := make([]interface{}, int(sz)) + i = j + for d := range j { + j[d], o, err = ReadIntfBytes(o) + if err != nil { + return + } + } + return + + case Float32Type: + i, o, err = ReadFloat32Bytes(b) + return + + case Float64Type: + i, o, err = ReadFloat64Bytes(b) + return + + case IntType: + i, o, err = ReadInt64Bytes(b) + return + + case UintType: + i, o, err = ReadUint64Bytes(b) + return + + case BoolType: + i, o, err = ReadBoolBytes(b) + return + + case TimeType: + i, o, err = ReadTimeBytes(b) + return + + case Complex64Type: + i, o, err = ReadComplex64Bytes(b) + return + + case Complex128Type: + i, o, err = ReadComplex128Bytes(b) + return + + case ExtensionType: + var t int8 + t, err = peekExtension(b) + if err != nil { + return + } + // use a user-defined extension, + // if it's been registered + f, ok := extensionReg[t] + if ok { + e := f() + o, err = ReadExtensionBytes(b, e) + i = e + return + } + // last resort is a raw extension + e := RawExtension{} + e.Type = int8(t) + o, err = ReadExtensionBytes(b, &e) + i = &e + return + + case NilType: + o, err = ReadNilBytes(b) + return + + case BinType: + i, o, err = ReadBytesBytes(b, nil) + return + + case StrType: + i, o, err = ReadStringBytes(b) + return + + default: + err = InvalidPrefixError(b[0]) + return + } +} + +// Skip skips the next object in 'b' and +// returns the remaining bytes. If the object +// is a map or array, all of its elements +// will be skipped. +// Possible Errors: +// - ErrShortBytes (not enough bytes in b) +// - InvalidPrefixError (bad encoding) +func Skip(b []byte) ([]byte, error) { + sz, asz, err := getSize(b) + if err != nil { + return b, err + } + if uintptr(len(b)) < sz { + return b, ErrShortBytes + } + b = b[sz:] + for asz > 0 { + b, err = Skip(b) + if err != nil { + return b, err + } + asz-- + } + return b, nil +} + +// returns (skip N bytes, skip M objects, error) +func getSize(b []byte) (uintptr, uintptr, error) { + l := len(b) + if l == 0 { + return 0, 0, ErrShortBytes + } + lead := b[0] + spec := &sizes[lead] // get type information + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { // fixed composites + return uintptr(size), uintptr(mode), nil + } + if l < int(size) { + return 0, 0, ErrShortBytes + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go new file mode 100644 index 000000000000..ce2f8b16ff26 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/size.go @@ -0,0 +1,38 @@ +package msgp + +// The sizes provided +// are the worst-case +// encoded sizes for +// each type. For variable- +// length types ([]byte, string), +// the total encoded size is +// the prefix size plus the +// length of the object. +const ( + Int64Size = 9 + IntSize = Int64Size + UintSize = Int64Size + Int8Size = 2 + Int16Size = 3 + Int32Size = 5 + Uint8Size = 2 + ByteSize = Uint8Size + Uint16Size = 3 + Uint32Size = 5 + Uint64Size = Int64Size + Float64Size = 9 + Float32Size = 5 + Complex64Size = 10 + Complex128Size = 18 + + TimeSize = 15 + BoolSize = 1 + NilSize = 1 + + MapHeaderSize = 5 + ArrayHeaderSize = 5 + + BytesPrefixSize = 5 + StringPrefixSize = 5 + ExtensionPrefixSize = 6 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go new file mode 100644 index 000000000000..0cb972e3be3b --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -0,0 +1,40 @@ +// +build !appengine + +package msgp + +import ( + "reflect" + "unsafe" +) + +// NOTE: +// all of the definition in this file +// should be repeated in appengine.go, +// but without using unsafe + +const ( + // spec says int and uint are always + // the same size, but that int/uint + // size may not be machine word size + smallint = unsafe.Sizeof(int(0)) == 4 +) + +// UnsafeString returns the byte slice as a volatile string +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)})) +} + +// UnsafeBytes returns the string as a byte slice +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: len(s), + Cap: len(s), + Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + })) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go new file mode 100644 index 000000000000..0245c1bd79c6 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write.go @@ -0,0 +1,845 @@ +package msgp + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "sync" + "time" +) + +// Sizer is an interface implemented +// by types that can estimate their +// size when MessagePack encoded. +// This interface is optional, but +// encoding/marshaling implementations +// may use this as a way to pre-allocate +// memory for serialization. +type Sizer interface { + Msgsize() int +} + +var ( + // Nowhere is an io.Writer to nowhere + Nowhere io.Writer = nwhere{} + + btsType = reflect.TypeOf(([]byte)(nil)) + writerPool = sync.Pool{ + New: func() interface{} { + return &Writer{buf: make([]byte, 2048)} + }, + } +) + +func popWriter(w io.Writer) *Writer { + wr := writerPool.Get().(*Writer) + wr.Reset(w) + return wr +} + +func pushWriter(wr *Writer) { + wr.w = nil + wr.wloc = 0 + writerPool.Put(wr) +} + +// freeW frees a writer for use +// by other processes. It is not necessary +// to call freeW on a writer. However, maintaining +// a reference to a *Writer after calling freeW on +// it will cause undefined behavior. +func freeW(w *Writer) { pushWriter(w) } + +// Require ensures that cap(old)-len(old) >= extra. +func Require(old []byte, extra int) []byte { + l := len(old) + c := cap(old) + r := l + extra + if c >= r { + return old + } else if l == 0 { + return make([]byte, 0, extra) + } + // the new size is the greater + // of double the old capacity + // and the sum of the old length + // and the number of new bytes + // necessary. + c <<= 1 + if c < r { + c = r + } + n := make([]byte, l, c) + copy(n, old) + return n +} + +// nowhere writer +type nwhere struct{} + +func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } + +// Marshaler is the interface implemented +// by types that know how to marshal themselves +// as MessagePack. MarshalMsg appends the marshalled +// form of the object to the provided +// byte slice, returning the extended +// slice and any errors encountered. +type Marshaler interface { + MarshalMsg([]byte) ([]byte, error) +} + +// Encodable is the interface implemented +// by types that know how to write themselves +// as MessagePack using a *msgp.Writer. +type Encodable interface { + EncodeMsg(*Writer) error +} + +// Writer is a buffered writer +// that can be used to write +// MessagePack objects to an io.Writer. +// You must call *Writer.Flush() in order +// to flush all of the buffered data +// to the underlying writer. +type Writer struct { + w io.Writer + buf []byte + wloc int +} + +// NewWriter returns a new *Writer. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return popWriter(w) +} + +// NewWriterSize returns a writer with a custom buffer size. +func NewWriterSize(w io.Writer, sz int) *Writer { + // we must be able to require() 18 + // contiguous bytes, so that is the + // practical minimum buffer size + if sz < 18 { + sz = 18 + } + + return &Writer{ + w: w, + buf: make([]byte, sz), + } +} + +// Encode encodes an Encodable to an io.Writer. +func Encode(w io.Writer, e Encodable) error { + wr := NewWriter(w) + err := e.EncodeMsg(wr) + if err == nil { + err = wr.Flush() + } + freeW(wr) + return err +} + +func (mw *Writer) flush() error { + if mw.wloc == 0 { + return nil + } + n, err := mw.w.Write(mw.buf[:mw.wloc]) + if err != nil { + if n > 0 { + mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) + } + return err + } + mw.wloc = 0 + return nil +} + +// Flush flushes all of the buffered +// data to the underlying writer. +func (mw *Writer) Flush() error { return mw.flush() } + +// Buffered returns the number bytes in the write buffer +func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) bufsize() int { return len(mw.buf) } + +// NOTE: this should only be called with +// a number that is guaranteed to be less than +// len(mw.buf). typically, it is called with a constant. +// +// NOTE: this is a hot code path +func (mw *Writer) require(n int) (int, error) { + c := len(mw.buf) + wl := mw.wloc + if c-wl < n { + if err := mw.flush(); err != nil { + return 0, err + } + wl = mw.wloc + } + mw.wloc += n + return wl, nil +} + +func (mw *Writer) Append(b ...byte) error { + if mw.avail() < len(b) { + err := mw.flush() + if err != nil { + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], b) + return nil +} + +// push one byte onto the buffer +// +// NOTE: this is a hot code path +func (mw *Writer) push(b byte) error { + if mw.wloc == len(mw.buf) { + if err := mw.flush(); err != nil { + return err + } + } + mw.buf[mw.wloc] = b + mw.wloc++ + return nil +} + +func (mw *Writer) prefix8(b byte, u uint8) error { + const need = 2 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu8(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix16(b byte, u uint16) error { + const need = 3 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu16(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix32(b byte, u uint32) error { + const need = 5 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu32(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix64(b byte, u uint64) error { + const need = 9 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu64(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +// Write implements io.Writer, and writes +// data directly to the buffer. +func (mw *Writer) Write(p []byte) (int, error) { + l := len(p) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return 0, err + } + if l > len(mw.buf) { + return mw.w.Write(p) + } + } + mw.wloc += copy(mw.buf[mw.wloc:], p) + return l, nil +} + +// implements io.WriteString +func (mw *Writer) writeString(s string) error { + l := len(s) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return err + } + if l > len(mw.buf) { + _, err := io.WriteString(mw.w, s) + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], s) + return nil +} + +// Reset changes the underlying writer used by the Writer +func (mw *Writer) Reset(w io.Writer) { + mw.buf = mw.buf[:cap(mw.buf)] + mw.w = w + mw.wloc = 0 +} + +// WriteMapHeader writes a map header of the given +// size to the writer +func (mw *Writer) WriteMapHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixmap(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(mmap16, uint16(sz)) + default: + return mw.prefix32(mmap32, sz) + } +} + +// WriteArrayHeader writes an array header of the +// given size to the writer +func (mw *Writer) WriteArrayHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixarray(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(marray16, uint16(sz)) + default: + return mw.prefix32(marray32, sz) + } +} + +// WriteNil writes a nil byte to the buffer +func (mw *Writer) WriteNil() error { + return mw.push(mnil) +} + +// WriteFloat64 writes a float64 to the writer +func (mw *Writer) WriteFloat64(f float64) error { + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat32 writes a float32 to the writer +func (mw *Writer) WriteFloat32(f float32) error { + return mw.prefix32(mfloat32, math.Float32bits(f)) +} + +// WriteInt64 writes an int64 to the writer +func (mw *Writer) WriteInt64(i int64) error { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return mw.push(wfixint(uint8(i))) + case i <= math.MaxInt16: + return mw.prefix16(mint16, uint16(i)) + case i <= math.MaxInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } + } + switch { + case i >= -32: + return mw.push(wnfixint(int8(i))) + case i >= math.MinInt8: + return mw.prefix8(mint8, uint8(i)) + case i >= math.MinInt16: + return mw.prefix16(mint16, uint16(i)) + case i >= math.MinInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } +} + +// WriteInt8 writes an int8 to the writer +func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } + +// WriteInt16 writes an int16 to the writer +func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } + +// WriteInt32 writes an int32 to the writer +func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } + +// WriteInt writes an int to the writer +func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } + +// WriteUint64 writes a uint64 to the writer +func (mw *Writer) WriteUint64(u uint64) error { + switch { + case u <= (1<<7)-1: + return mw.push(wfixint(uint8(u))) + case u <= math.MaxUint8: + return mw.prefix8(muint8, uint8(u)) + case u <= math.MaxUint16: + return mw.prefix16(muint16, uint16(u)) + case u <= math.MaxUint32: + return mw.prefix32(muint32, uint32(u)) + default: + return mw.prefix64(muint64, u) + } +} + +// WriteByte is analogous to WriteUint8 +func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } + +// WriteUint8 writes a uint8 to the writer +func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint16 writes a uint16 to the writer +func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint32 writes a uint32 to the writer +func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint writes a uint to the writer +func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } + +// WriteBytes writes binary as 'bin' to the writer +func (mw *Writer) WriteBytes(b []byte) error { + sz := uint32(len(b)) + var err error + switch { + case sz <= math.MaxUint8: + err = mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mbin16, uint16(sz)) + default: + err = mw.prefix32(mbin32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(b) + return err +} + +// WriteBytesHeader writes just the size header +// of a MessagePack 'bin' object. The user is responsible +// for then writing 'sz' more bytes into the stream. +func (mw *Writer) WriteBytesHeader(sz uint32) error { + switch { + case sz <= math.MaxUint8: + return mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mbin16, uint16(sz)) + default: + return mw.prefix32(mbin32, sz) + } +} + +// WriteBool writes a bool to the writer +func (mw *Writer) WriteBool(b bool) error { + if b { + return mw.push(mtrue) + } + return mw.push(mfalse) +} + +// WriteString writes a messagepack string to the writer. +// (This is NOT an implementation of io.StringWriter) +func (mw *Writer) WriteString(s string) error { + sz := uint32(len(s)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + return mw.writeString(s) +} + +// WriteStringHeader writes just the string size +// header of a MessagePack 'str' object. The user +// is responsible for writing 'sz' more valid UTF-8 +// bytes to the stream. +func (mw *Writer) WriteStringHeader(sz uint32) error { + switch { + case sz <= 31: + return mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + return mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mstr16, uint16(sz)) + default: + return mw.prefix32(mstr32, sz) + } +} + +// WriteStringFromBytes writes a 'str' object +// from a []byte. +func (mw *Writer) WriteStringFromBytes(str []byte) error { + sz := uint32(len(str)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(str) + return err +} + +// WriteComplex64 writes a complex64 to the writer +func (mw *Writer) WriteComplex64(f complex64) error { + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = Complex64Extension + big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) + big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) + return nil +} + +// WriteComplex128 writes a complex128 to the writer +func (mw *Writer) WriteComplex128(f complex128) error { + o, err := mw.require(18) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = Complex128Extension + big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) + big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) + return nil +} + +// WriteMapStrStr writes a map[string]string to the writer +func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteString(val) + if err != nil { + return + } + } + return nil +} + +// WriteMapStrIntf writes a map[string]interface to the writer +func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteIntf(val) + if err != nil { + return + } + } + return +} + +// WriteTime writes a time.Time object to the wire. +// +// Time is encoded as Unix time, which means that +// location (time zone) data is removed from the object. +// The encoded object itself is 12 bytes: 8 bytes for +// a big-endian 64-bit integer denoting seconds +// elapsed since "zero" Unix time, followed by 4 bytes +// for a big-endian 32-bit signed integer denoting +// the nanosecond offset of the time. This encoding +// is intended to ease portability across languages. +// (Note that this is *not* the standard time.Time +// binary encoding, because its implementation relies +// heavily on the internal representation used by the +// time package.) +func (mw *Writer) WriteTime(t time.Time) error { + t = t.UTC() + o, err := mw.require(15) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = TimeExtension + putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) + return nil +} + +// WriteIntf writes the concrete type of 'v'. +// WriteIntf will error if 'v' is not one of the following: +// - A bool, float, string, []byte, int, uint, or complex +// - A map of supported types (with string keys) +// - An array or slice of supported types +// - A pointer to a supported type +// - A type that satisfies the msgp.Encodable interface +// - A type that satisfies the msgp.Extension interface +func (mw *Writer) WriteIntf(v interface{}) error { + if v == nil { + return mw.WriteNil() + } + switch v := v.(type) { + + // preferred interfaces + + case Encodable: + return v.EncodeMsg(mw) + case Extension: + return mw.WriteExtension(v) + + // concrete types + + case bool: + return mw.WriteBool(v) + case float32: + return mw.WriteFloat32(v) + case float64: + return mw.WriteFloat64(v) + case complex64: + return mw.WriteComplex64(v) + case complex128: + return mw.WriteComplex128(v) + case uint8: + return mw.WriteUint8(v) + case uint16: + return mw.WriteUint16(v) + case uint32: + return mw.WriteUint32(v) + case uint64: + return mw.WriteUint64(v) + case uint: + return mw.WriteUint(v) + case int8: + return mw.WriteInt8(v) + case int16: + return mw.WriteInt16(v) + case int32: + return mw.WriteInt32(v) + case int64: + return mw.WriteInt64(v) + case int: + return mw.WriteInt(v) + case string: + return mw.WriteString(v) + case []byte: + return mw.WriteBytes(v) + case map[string]string: + return mw.WriteMapStrStr(v) + case map[string]interface{}: + return mw.WriteMapStrIntf(v) + case time.Time: + return mw.WriteTime(v) + } + + val := reflect.ValueOf(v) + if !isSupported(val.Kind()) || !val.IsValid() { + return fmt.Errorf("msgp: type %s not supported", val) + } + + switch val.Kind() { + case reflect.Ptr: + if val.IsNil() { + return mw.WriteNil() + } + return mw.WriteIntf(val.Elem().Interface()) + case reflect.Slice: + return mw.writeSlice(val) + case reflect.Map: + return mw.writeMap(val) + } + return &ErrUnsupportedType{val.Type()} +} + +func (mw *Writer) writeMap(v reflect.Value) (err error) { + if v.Elem().Kind() != reflect.String { + return errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + err = mw.WriteMapHeader(uint32(len(ks))) + if err != nil { + return + } + for _, key := range ks { + val := v.MapIndex(key) + err = mw.WriteString(key.String()) + if err != nil { + return + } + err = mw.WriteIntf(val.Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeSlice(v reflect.Value) (err error) { + // is []byte + if v.Type().ConvertibleTo(btsType) { + return mw.WriteBytes(v.Bytes()) + } + + sz := uint32(v.Len()) + err = mw.WriteArrayHeader(sz) + if err != nil { + return + } + for i := uint32(0); i < sz; i++ { + err = mw.WriteIntf(v.Index(int(i)).Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeStruct(v reflect.Value) error { + if enc, ok := v.Interface().(Encodable); ok { + return enc.EncodeMsg(mw) + } + return fmt.Errorf("msgp: unsupported type: %s", v.Type()) +} + +func (mw *Writer) writeVal(v reflect.Value) error { + if !isSupported(v.Kind()) { + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) + } + + // shortcut for nil values + if v.IsNil() { + return mw.WriteNil() + } + switch v.Kind() { + case reflect.Bool: + return mw.WriteBool(v.Bool()) + + case reflect.Float32, reflect.Float64: + return mw.WriteFloat64(v.Float()) + + case reflect.Complex64, reflect.Complex128: + return mw.WriteComplex128(v.Complex()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: + return mw.WriteInt64(v.Int()) + + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + mw.WriteNil() + } + return mw.writeVal(v.Elem()) + + case reflect.Map: + return mw.writeMap(v) + + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: + return mw.WriteUint64(v.Uint()) + + case reflect.String: + return mw.WriteString(v.String()) + + case reflect.Slice, reflect.Array: + return mw.writeSlice(v) + + case reflect.Struct: + return mw.writeStruct(v) + + } + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) +} + +// is the reflect.Kind encodable? +func isSupported(k reflect.Kind) bool { + switch k { + case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: + return false + default: + return true + } +} + +// GuessSize guesses the size of the underlying +// value of 'i'. If the underlying value is not +// a simple builtin (or []byte), GuessSize defaults +// to 512. +func GuessSize(i interface{}) int { + if i == nil { + return NilSize + } + + switch i := i.(type) { + case Sizer: + return i.Msgsize() + case Extension: + return ExtensionPrefixSize + i.Len() + case float64: + return Float64Size + case float32: + return Float32Size + case uint8, uint16, uint32, uint64, uint: + return UintSize + case int8, int16, int32, int64, int: + return IntSize + case []byte: + return BytesPrefixSize + len(i) + case string: + return StringPrefixSize + len(i) + case complex64: + return Complex64Size + case complex128: + return Complex128Size + case bool: + return BoolSize + case map[string]interface{}: + s := MapHeaderSize + for key, val := range i { + s += StringPrefixSize + len(key) + GuessSize(val) + } + return s + case map[string]string: + s := MapHeaderSize + for key, val := range i { + s += 2*StringPrefixSize + len(key) + len(val) + } + return s + default: + return 512 + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go new file mode 100644 index 000000000000..eaa03c46ebb1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -0,0 +1,411 @@ +package msgp + +import ( + "math" + "reflect" + "time" +) + +// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) +func ensure(b []byte, sz int) ([]byte, int) { + l := len(b) + c := cap(b) + if c-l < sz { + o := make([]byte, (2*c)+sz) // exponential growth + n := copy(o, b) + return o[:n+sz], n + } + return b[:l+sz], l +} + +// AppendMapHeader appends a map header with the +// given size to the slice +func AppendMapHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixmap(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], mmap16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], mmap32, sz) + return o + } +} + +// AppendArrayHeader appends an array header with +// the given size to the slice +func AppendArrayHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixarray(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], marray16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], marray32, sz) + return o + } +} + +// AppendNil appends a 'nil' byte to the slice +func AppendNil(b []byte) []byte { return append(b, mnil) } + +// AppendFloat64 appends a float64 to the slice +func AppendFloat64(b []byte, f float64) []byte { + o, n := ensure(b, Float64Size) + prefixu64(o[n:], mfloat64, math.Float64bits(f)) + return o +} + +// AppendFloat32 appends a float32 to the slice +func AppendFloat32(b []byte, f float32) []byte { + o, n := ensure(b, Float32Size) + prefixu32(o[n:], mfloat32, math.Float32bits(f)) + return o +} + +// AppendInt64 appends an int64 to the slice +func AppendInt64(b []byte, i int64) []byte { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return append(b, wfixint(uint8(i))) + case i <= math.MaxInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i <= math.MaxInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } + } + switch { + case i >= -32: + return append(b, wnfixint(int8(i))) + case i >= math.MinInt8: + o, n := ensure(b, 2) + putMint8(o[n:], int8(i)) + return o + case i >= math.MinInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i >= math.MinInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } +} + +// AppendInt appends an int to the slice +func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt8 appends an int8 to the slice +func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt16 appends an int16 to the slice +func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt32 appends an int32 to the slice +func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } + +// AppendUint64 appends a uint64 to the slice +func AppendUint64(b []byte, u uint64) []byte { + switch { + case u <= (1<<7)-1: + return append(b, wfixint(uint8(u))) + + case u <= math.MaxUint8: + o, n := ensure(b, 2) + putMuint8(o[n:], uint8(u)) + return o + + case u <= math.MaxUint16: + o, n := ensure(b, 3) + putMuint16(o[n:], uint16(u)) + return o + + case u <= math.MaxUint32: + o, n := ensure(b, 5) + putMuint32(o[n:], uint32(u)) + return o + + default: + o, n := ensure(b, 9) + putMuint64(o[n:], u) + return o + + } +} + +// AppendUint appends a uint to the slice +func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint8 appends a uint8 to the slice +func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } + +// AppendByte is analogous to AppendUint8 +func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } + +// AppendUint16 appends a uint16 to the slice +func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint32 appends a uint32 to the slice +func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } + +// AppendBytes appends bytes to the slice as MessagePack 'bin' data +func AppendBytes(b []byte, bts []byte) []byte { + sz := len(bts) + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mbin8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mbin16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mbin32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], bts)] +} + +// AppendBool appends a bool to the slice +func AppendBool(b []byte, t bool) []byte { + if t { + return append(b, mtrue) + } + return append(b, mfalse) +} + +// AppendString appends a string as a MessagePack 'str' to the slice +func AppendString(b []byte, s string) []byte { + sz := len(s) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], s)] +} + +// AppendStringFromBytes appends a []byte +// as a MessagePack 'str' to the slice 'b.' +func AppendStringFromBytes(b []byte, str []byte) []byte { + sz := len(str) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], str)] +} + +// AppendComplex64 appends a complex64 to the slice as a MessagePack extension +func AppendComplex64(b []byte, c complex64) []byte { + o, n := ensure(b, Complex64Size) + o[n] = mfixext8 + o[n+1] = Complex64Extension + big.PutUint32(o[n+2:], math.Float32bits(real(c))) + big.PutUint32(o[n+6:], math.Float32bits(imag(c))) + return o +} + +// AppendComplex128 appends a complex128 to the slice as a MessagePack extension +func AppendComplex128(b []byte, c complex128) []byte { + o, n := ensure(b, Complex128Size) + o[n] = mfixext16 + o[n+1] = Complex128Extension + big.PutUint64(o[n+2:], math.Float64bits(real(c))) + big.PutUint64(o[n+10:], math.Float64bits(imag(c))) + return o +} + +// AppendTime appends a time.Time to the slice as a MessagePack extension +func AppendTime(b []byte, t time.Time) []byte { + o, n := ensure(b, TimeSize) + t = t.UTC() + o[n] = mext8 + o[n+1] = 12 + o[n+2] = TimeExtension + putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) + return o +} + +// AppendMapStrStr appends a map[string]string to the slice +// as a MessagePack map with 'str'-type keys and values +func AppendMapStrStr(b []byte, m map[string]string) []byte { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + for key, val := range m { + b = AppendString(b, key) + b = AppendString(b, val) + } + return b +} + +// AppendMapStrIntf appends a map[string]interface{} to the slice +// as a MessagePack map with 'str'-type keys. +func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + var err error + for key, val := range m { + b = AppendString(b, key) + b, err = AppendIntf(b, val) + if err != nil { + return b, err + } + } + return b, nil +} + +// AppendIntf appends the concrete type of 'i' to the +// provided []byte. 'i' must be one of the following: +// - 'nil' +// - A bool, float, string, []byte, int, uint, or complex +// - A map[string]interface{} or map[string]string +// - A []T, where T is another supported type +// - A *T, where T is another supported type +// - A type that satisfieds the msgp.Marshaler interface +// - A type that satisfies the msgp.Extension interface +func AppendIntf(b []byte, i interface{}) ([]byte, error) { + if i == nil { + return AppendNil(b), nil + } + + // all the concrete types + // for which we have methods + switch i := i.(type) { + case Marshaler: + return i.MarshalMsg(b) + case Extension: + return AppendExtension(b, i) + case bool: + return AppendBool(b, i), nil + case float32: + return AppendFloat32(b, i), nil + case float64: + return AppendFloat64(b, i), nil + case complex64: + return AppendComplex64(b, i), nil + case complex128: + return AppendComplex128(b, i), nil + case string: + return AppendString(b, i), nil + case []byte: + return AppendBytes(b, i), nil + case int8: + return AppendInt8(b, i), nil + case int16: + return AppendInt16(b, i), nil + case int32: + return AppendInt32(b, i), nil + case int64: + return AppendInt64(b, i), nil + case int: + return AppendInt64(b, int64(i)), nil + case uint: + return AppendUint64(b, uint64(i)), nil + case uint8: + return AppendUint8(b, i), nil + case uint16: + return AppendUint16(b, i), nil + case uint32: + return AppendUint32(b, i), nil + case uint64: + return AppendUint64(b, i), nil + case time.Time: + return AppendTime(b, i), nil + case map[string]interface{}: + return AppendMapStrIntf(b, i) + case map[string]string: + return AppendMapStrStr(b, i), nil + case []interface{}: + b = AppendArrayHeader(b, uint32(len(i))) + var err error + for _, k := range i { + b, err = AppendIntf(b, k) + if err != nil { + return b, err + } + } + return b, nil + } + + var err error + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Array, reflect.Slice: + l := v.Len() + b = AppendArrayHeader(b, uint32(l)) + for i := 0; i < l; i++ { + b, err = AppendIntf(b, v.Index(i).Interface()) + if err != nil { + return b, err + } + } + return b, nil + case reflect.Ptr: + if v.IsNil() { + return AppendNil(b), err + } + b, err = AppendIntf(b, v.Elem().Interface()) + return b, err + default: + return b, &ErrUnsupportedType{T: v.Type()} + } +} diff --git a/vendor/github.com/weaveworks/billing-client/.gitignore b/vendor/github.com/weaveworks/billing-client/.gitignore new file mode 100644 index 000000000000..6a0e16b9c6be --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/.gitignore @@ -0,0 +1,23 @@ +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +vendor diff --git a/vendor/github.com/weaveworks/billing-client/Gopkg.lock b/vendor/github.com/weaveworks/billing-client/Gopkg.lock new file mode 100644 index 000000000000..84f8c36720b4 --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/Gopkg.lock @@ -0,0 +1,105 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + +[[projects]] + name = "github.com/fluent/fluent-logger-golang" + packages = ["fluent"] + revision = "28bdb662295c1b7aca09c1a4edbf9430010775a9" + version = "v1.2.1" + +[[projects]] + branch = "master" + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "130e6b02ab059e7b717a096f397c5b60111cae74" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + name = "github.com/opentracing/opentracing-go" + packages = [".","ext","log"] + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" + +[[projects]] + branch = "master" + name = "github.com/philhofer/fwd" + packages = ["."] + revision = "bb6d471dc95d4fe11e432687f8b70ff496cf3136" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = ["prometheus"] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "6f3806018612930941127f2a7c6c453ba2c527d2" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"] + revision = "2f17f4a9d485bf34b4bfaccc273805040e4f86c8" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [".","xfs"] + revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2" + +[[projects]] + name = "github.com/sirupsen/logrus" + packages = ["."] + revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e" + version = "v1.0.3" + +[[projects]] + name = "github.com/tinylib/msgp" + packages = ["msgp"] + revision = "b2b6a672cf1e5b90748f79b8b81fc8c5cf0571a1" + version = "v1.0.2" + +[[projects]] + branch = "master" + name = "github.com/weaveworks/common" + packages = ["instrument"] + revision = "79ec4e6e7a299b05f8e6a78912251cfc0432602c" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = ["context"] + revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix","windows"] + revision = "314a259e304ff91bd6985da2a7149bbf91237993" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "7b6c022a128f921446ae15ac4714d67909953bf4b3970706dbb0248704949aad" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/weaveworks/billing-client/Gopkg.toml b/vendor/github.com/weaveworks/billing-client/Gopkg.toml new file mode 100644 index 000000000000..f359d7b99e9c --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/Gopkg.toml @@ -0,0 +1,26 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/Sirupsen/logrus" + version = "1.0.3" diff --git a/vendor/github.com/weaveworks/billing-client/README.md b/vendor/github.com/weaveworks/billing-client/README.md new file mode 100644 index 000000000000..a234a953a8a1 --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/README.md @@ -0,0 +1,41 @@ +# billing-client + +A client library for sending usage data to the billing system. + +Open sourced so it can be imported into our open-source projects. + +## Usage + +`dep ensure github.com/weaveworks/billing-client` + +then + +```Go +import billing "github.com/weaveworks/billing-client" + +func init() { + billing.MustRegisterMetrics() +} + +func main() { + var cfg billing.Config + cfg.RegisterFlags(flag.CommandLine) + flag.Parse() + + client, err := billing.NewClient(cfg) + defer client.Close() + + err = client.AddAmounts( + uniqueKey, // Unique hash of the data, or a uuid here for deduping + internalInstanceID, + timestamp, + billing.Amounts{ + billing.ContainerSeconds: 1234, + }, + map[string]string{ + "metadata": "goes here" + }, + ) +} + +``` diff --git a/vendor/github.com/weaveworks/billing-client/client.go b/vendor/github.com/weaveworks/billing-client/client.go new file mode 100644 index 000000000000..5f18738074df --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/client.go @@ -0,0 +1,207 @@ +package billing + +import ( + "context" + "fmt" + "net" + "strconv" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/prometheus/client_golang/prometheus" + + "github.com/weaveworks/common/instrument" +) + +var ( + // requestCollector is the duration of billing client requests + requestCollector = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "billing_client", + Name: "request_duration_seconds", + Help: "Time in seconds spent emitting billing info.", + Buckets: prometheus.DefBuckets, + }, []string{"method", "status_code"})) + + // EventsCounter is the count of billing events + EventsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "billing_client", + Name: "events_total", + Help: "Number and type of billing events", + }, []string{"status", "amount_type"}) + // AmountsCounter is the total of the billing amounts + AmountsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "billing_client", + Name: "amounts_total", + Help: "Number and type of billing amounts", + }, []string{"status", "amount_type"}) +) + +// MustRegisterMetrics is a convenience function for registering all the metrics from this package +func MustRegisterMetrics() { + requestCollector.Register() + prometheus.MustRegister(EventsCounter) + prometheus.MustRegister(AmountsCounter) +} + +// Client is a billing client for sending usage information to the billing system. +type Client struct { + stop chan struct{} + wg sync.WaitGroup + events chan Event + logger *fluent.Fluent + Config +} + +// NewClient creates a new billing client. +func NewClient(cfg Config) (*Client, error) { + host, port, err := net.SplitHostPort(cfg.IngesterHostPort) + if err != nil { + return nil, err + } + intPort, err := strconv.Atoi(port) + if err != nil { + return nil, err + } + logger, err := fluent.New(fluent.Config{ + FluentPort: intPort, + FluentHost: host, + AsyncConnect: true, + MaxRetry: -1, + MarshalAsJSON: true, + }) + if err != nil { + return nil, err + } + + c := &Client{ + stop: make(chan struct{}), + events: make(chan Event, cfg.MaxBufferedEvents), + logger: logger, + Config: cfg, + } + c.wg.Add(1) + go c.loop() + return c, nil +} + +// AddAmounts writes unit increments into the billing system. If the call does +// not complete (due to a crash, etc), then data may or may not have been +// written successfully. +// +// Requests with the same `uniqueKey` can be retried indefinitely until they +// succeed, and the results will be deduped. +// +// `uniqueKey` must be set, and not blank. If in doubt, generate a uuid and set +// that as the uniqueKey. Consider that hashing the raw input data may not be +// good enough since identical data may be sent from the client multiple times. +// +// `internalInstanceID`, is *not* the external instance ID (e.g. +// "fluffy-bunny-47"), it is the numeric internal instance ID (e.g. "1234"). +// +// `timestamp` is used to determine which time bucket the usage occurred in, it +// is included so that the result is independent of how long processing takes. +// Note, in the event of buffering this timestamp may *not* agree with when the +// charge will be billed to the customer. +// +// `amounts` is a map with all the various amounts we wish to charge the user +// for. +// +// `metadata` is a general dumping ground for other metadata you may wish to +// include for auditability. In general, be careful about the size of data put +// here. Prefer including a lookup address over whole data. For example, +// include a report id or s3 address instead of the information in the report. +func (c *Client) AddAmounts(uniqueKey, internalInstanceID string, timestamp time.Time, amounts Amounts, metadata map[string]string) error { + return instrument.CollectedRequest(context.Background(), "Billing.AddAmounts", requestCollector, nil, func(_ context.Context) error { + if uniqueKey == "" { + return fmt.Errorf("billing: units uniqueKey cannot be blank") + } + + e := Event{ + UniqueKey: uniqueKey, + InternalInstanceID: internalInstanceID, + OccurredAt: timestamp, + Amounts: amounts, + Metadata: metadata, + } + + select { + case <-c.stop: + trackEvent("stopping", e) + return fmt.Errorf("billing: stopping, discarding event: %v", e) + default: + } + + select { + case c.events <- e: // Put event in the channel unless it is full + return nil + default: + // full + } + trackEvent("buffer_full", e) + return fmt.Errorf("billing: reached billing event buffer limit (%d), discarding event: %v", c.MaxBufferedEvents, e) + }) +} + +func (c *Client) loop() { + defer c.wg.Done() + for done := false; !done; { + select { + case event := <-c.events: + c.post(event) + case <-c.stop: + done = true + } + } + + // flush remaining events + for done := false; !done; { + select { + case event := <-c.events: + c.post(event) + default: + done = true + } + } +} + +func (c *Client) post(e Event) error { + for { + var err error + for _, r := range e.toRecords() { + if err = c.logger.Post("billing", r); err != nil { + break + } + } + if err == nil { + trackEvent("success", e) + return nil + } + select { + case <-c.stop: + // We're quitting, no retries. + trackEvent("stopping", e) + log.Errorf("billing: failed to log event: %v: %v, stopping", e, err) + return err + default: + trackEvent("retrying", e) + log.Errorf("billing: failed to log event: %v: %v, retrying in %v", e, err, c.RetryDelay) + time.Sleep(c.RetryDelay) + } + } +} + +func trackEvent(status string, e Event) { + for t, v := range e.Amounts { + EventsCounter.WithLabelValues(status, string(t)).Inc() + AmountsCounter.WithLabelValues(status, string(t)).Add(float64(v)) + } +} + +// Close shuts down the client and attempts to flush remaining events. +func (c *Client) Close() error { + close(c.stop) + c.wg.Wait() + return c.logger.Close() +} diff --git a/vendor/github.com/weaveworks/billing-client/config.go b/vendor/github.com/weaveworks/billing-client/config.go new file mode 100644 index 000000000000..16e7b07d5a78 --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/config.go @@ -0,0 +1,20 @@ +package billing + +import ( + "flag" + "time" +) + +// Config is the config for a billing client +type Config struct { + MaxBufferedEvents int + RetryDelay time.Duration + IngesterHostPort string +} + +// RegisterFlags register the billing client flags with the main flag set +func (c *Config) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&c.MaxBufferedEvents, "billing.max-buffered-events", 1024, "Maximum number of billing events to buffer in memory") + f.DurationVar(&c.RetryDelay, "billing.retry-delay", 500*time.Millisecond, "How often to retry sending events to the billing ingester.") + f.StringVar(&c.IngesterHostPort, "billing.ingester", "localhost:24225", "points to the billing ingester sidecar (should be on localhost)") +} diff --git a/vendor/github.com/weaveworks/billing-client/event.go b/vendor/github.com/weaveworks/billing-client/event.go new file mode 100644 index 000000000000..d468eeeef6cf --- /dev/null +++ b/vendor/github.com/weaveworks/billing-client/event.go @@ -0,0 +1,45 @@ +package billing + +import ( + "time" +) + +// Event is a record of some amount of billable usage for scope. +type Event struct { + UniqueKey string `json:"unique_key" msg:"unique_key"` + InternalInstanceID string `json:"internal_instance_id" msg:"internal_instance_id"` + OccurredAt time.Time `json:"occurred_at" msg:"occurred_at"` + Amounts Amounts `json:"amounts" msg:"amounts"` + Metadata map[string]string `json:"metadata" msg:"metadata"` +} + +// msgpack (and therefore fluentd) requires the things we send to it to be +// map[string]interface{}, so we return them here, not a struct. :( +func (e Event) toRecords() []map[string]interface{} { + var records []map[string]interface{} + for t, v := range e.Amounts { + records = append(records, map[string]interface{}{ + "unique_key": e.UniqueKey + ":" + string(t), + "internal_instance_id": e.InternalInstanceID, + "amount_type": string(t), + "amount_value": v, + "occurred_at": e.OccurredAt, + "metadata": e.Metadata, + }) + } + return records +} + +// AmountType is a type-cast of the enum for the diferent amount types +type AmountType string + +const ( + // ContainerSeconds is one of the billable metrics + ContainerSeconds AmountType = "container-seconds" + NodeSeconds AmountType = "node-seconds" + WeaveNetSeconds AmountType = "weavenet-seconds" + Samples AmountType = "samples" +) + +// Amounts is a map of amount billable metrics to their values +type Amounts map[AmountType]int64 diff --git a/vendor/modules.txt b/vendor/modules.txt index 40d19399d76a..faa5dab8d1b2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -131,6 +131,7 @@ github.com/cortexproject/cortex/pkg/chunk/local github.com/cortexproject/cortex/pkg/chunk/storage github.com/cortexproject/cortex/pkg/chunk/testutils github.com/cortexproject/cortex/pkg/chunk/util +github.com/cortexproject/cortex/pkg/distributor github.com/cortexproject/cortex/pkg/ingester/client github.com/cortexproject/cortex/pkg/ingester/index github.com/cortexproject/cortex/pkg/prom1/storage/metric @@ -146,8 +147,10 @@ github.com/cortexproject/cortex/pkg/util github.com/cortexproject/cortex/pkg/util/extract github.com/cortexproject/cortex/pkg/util/flagext github.com/cortexproject/cortex/pkg/util/grpcclient +github.com/cortexproject/cortex/pkg/util/limiter github.com/cortexproject/cortex/pkg/util/middleware github.com/cortexproject/cortex/pkg/util/spanlogger +github.com/cortexproject/cortex/pkg/util/test github.com/cortexproject/cortex/pkg/util/validation # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew @@ -210,6 +213,8 @@ github.com/facette/natsort github.com/fatih/color # github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c github.com/fluent/fluent-bit-go/output +# github.com/fluent/fluent-logger-golang v1.2.1 +github.com/fluent/fluent-logger-golang/fluent # github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify # github.com/fsouza/fake-gcs-server v1.7.0 @@ -395,6 +400,8 @@ github.com/opentracing-contrib/go-stdlib/nethttp github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8 +github.com/philhofer/fwd # github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible github.com/pierrec/lz4 github.com/pierrec/lz4/internal/xxh32 @@ -488,6 +495,8 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require +# github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d +github.com/tinylib/msgp/msgp # github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 github.com/tmc/grpc-websocket-proxy/wsproxy # github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 @@ -515,6 +524,8 @@ github.com/uber/jaeger-lib/metrics github.com/uber/jaeger-lib/metrics/prometheus # github.com/ugorji/go/codec v1.1.7 github.com/ugorji/go/codec +# github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 +github.com/weaveworks/billing-client # github.com/weaveworks/common v0.0.0-20191103151037-0e7cefadc44f github.com/weaveworks/common/aws github.com/weaveworks/common/errors From c9218713255374628cfe4f11010dd8b10a74bd98 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 Jan 2020 12:56:32 +0100 Subject: [PATCH 2/5] Fixed code comments Signed-off-by: Marco Pracucci --- pkg/distributor/distributor.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 31f4d086fd27..6386cbfd5880 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -80,7 +80,7 @@ type Distributor struct { pool *cortex_client.Pool // The global rate limiter requires a distributors ring to count - // the number of healthy instances + // the number of healthy instances. distributorsRing *ring.Lifecycler // Per-user rate limiter. @@ -96,9 +96,7 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr } } - // Create the configured ingestion rate limit strategy (local or global). In case - // it's an internal dependency and can't join the distributors ring, we skip rate - // limiting. + // Create the configured ingestion rate limit strategy (local or global). var ingestionRateStrategy limiter.RateLimiterStrategy var distributorsRing *ring.Lifecycler From 2450c5cb9ea90ef1213ebab73caa2f048a4a78f9 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 Jan 2020 14:42:33 +0100 Subject: [PATCH 3/5] Updated config doc Signed-off-by: Marco Pracucci --- docs/configuration/README.md | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/docs/configuration/README.md b/docs/configuration/README.md index 035a5d203a49..055df0b91114 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -716,12 +716,27 @@ The `limits_config` block configures global and per-tenant limits for ingesting logs in Loki. ```yaml +# Whether the ingestion rate limit should be applied individually to each +# distributor instance (local), or evenly shared across the cluster (global). +# The ingestion rate strategy cannot be overridden on a per-tenant basis. +# +# - local: enforces the limit on a per distributor basis. The actual effective +# rate limit will be N times higher, where N is the number of distributor +# replicas. +# - global: enforces the limit globally, configuring a per-distributor local +# rate limiter as "ingestion_rate / N", where N is the number of distributor +# replicas (it's automatically adjusted if the number of replicas change). +# The global strategy requires the distributors to form their own ring, which +# is used to keep track of the current number of healthy distributor replicas. +[ingestion_rate_strategy: | default = "local"] + # Per-user ingestion rate limit in sample size per second. Units in MB. [ingestion_rate_mb: | default = 4] -# Per-user allowed ingestion burst size (in sample size). Units in MB. Warning, -# very high limits will be reset every limiter_reload_period defined in -# distributor_config. +# Per-user allowed ingestion burst size (in sample size). Units in MB. +# The burst size refers to the per-distributor local rate limiter even in the +# case of the "global" strategy, and should be set at least to the maximum logs +# size expected in a single push request. [ingestion_burst_size_mb: | default = 6] # Maximum length of a label name. From c9a2f4bb8801dea64960b5f16df1f262265bcf7b Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 Jan 2020 14:47:11 +0100 Subject: [PATCH 4/5] Updated changelog Signed-off-by: Marco Pracucci --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b6eb147280b..752e52fd6780 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,12 @@ ## master / unreleased +### Notable Changes +* [1486](https://github.com/grafana/loki/pull/1486) **pracucci**: Deprecated `-distributor.limiter-reload-period` flag / distributor's `limiter_reload_period` config option. + +### Features + * [FEATURE] promtail positions file corruptions can be ignored with the `positions.ignore-invalid-yaml` flag. In the case the positions yaml is corrupted an empty positions config will be used and should later overwrite the malformed yaml. +* [1486](https://github.com/grafana/loki/pull/1486) **pracucci**: Added `global` ingestion rate limiter strategy support. # 1.2.0 (2019-12-09) From fb8c532c4c4eb1df5b3072b9c4962d310a9f6559 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 8 Jan 2020 14:54:01 +0100 Subject: [PATCH 5/5] Updated doc Signed-off-by: Marco Pracucci --- docs/configuration/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/configuration/README.md b/docs/configuration/README.md index 055df0b91114..1ff721364241 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -144,8 +144,9 @@ The `server_config` block configures Promtail's behavior as an HTTP server: The `distributor_config` block configures the Loki Distributor. ```yaml -# Period at which to reload user ingestion limits. -[limiter_reload_period: | default = 5m] +# Configures the distributors ring, used when the "global" ingestion rate +# strategy is enabled. +[ring: ] ``` ## querier_config