diff --git a/cmd/docker-driver/config_test.go b/cmd/docker-driver/config_test.go index 0dd1dd8fe1a0..2338bb01decd 100644 --- a/cmd/docker-driver/config_test.go +++ b/cmd/docker-driver/config_test.go @@ -7,7 +7,7 @@ import ( "reflect" "testing" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/docker/docker/daemon/logger" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -117,7 +117,7 @@ func Test_parsePipeline(t *testing.T) { // all configs are supposed to be valid name := "foo" - _, err = stages.NewPipeline(util.Logger, got.PipelineStages, &name, prometheus.DefaultRegisterer) + _, err = stages.NewPipeline(util_log.Logger, got.PipelineStages, &name, prometheus.DefaultRegisterer) if err != nil { t.Error(err) } diff --git a/cmd/docker-driver/main.go b/cmd/docker-driver/main.go index 3f76ed7fc687..f568715fa00f 100644 --- a/cmd/docker-driver/main.go +++ b/cmd/docker-driver/main.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/docker/go-plugins-helpers/sdk" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -29,7 +29,7 @@ func main() { os.Exit(1) } logger := newLogger(logLevel) - level.Info(util.Logger).Log("msg", "Starting docker-plugin", "version", version.Info()) + level.Info(util_log.Logger).Log("msg", "Starting docker-plugin", "version", version.Info()) h := sdk.NewHandler(`{"Implements": ["LoggingDriver"]}`) diff --git a/cmd/loki/main.go b/cmd/loki/main.go index d69ac92113ad..4adcf1fcd37c 100644 --- a/cmd/loki/main.go +++ b/cmd/loki/main.go @@ -18,7 +18,6 @@ import ( "github.com/grafana/loki/pkg/loki" logutil "github.com/grafana/loki/pkg/util" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/grafana/loki/pkg/util/validation" @@ -76,35 +75,35 @@ func main() { // Init the logger which will honor the log level set in config.Server if reflect.DeepEqual(&config.Server.LogLevel, &logging.Level{}) { - level.Error(util.Logger).Log("msg", "invalid log level") + level.Error(util_log.Logger).Log("msg", "invalid log level") os.Exit(1) } - util.InitLogger(&config.Server) + util_log.InitLogger(&config.Server) // Validate the config once both the config file has been loaded // and CLI flags parsed. - err := config.Validate(util.Logger) + err := config.Validate(util_log.Logger) if err != nil { - level.Error(util.Logger).Log("msg", "validating config", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "validating config", "err", err.Error()) os.Exit(1) } if config.verifyConfig { - level.Info(util.Logger).Log("msg", "config is valid") + level.Info(util_log.Logger).Log("msg", "config is valid") os.Exit(0) } if config.printConfig { err := logutil.PrintConfig(os.Stderr, &config) if err != nil { - level.Error(util.Logger).Log("msg", "failed to print config to stderr", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "failed to print config to stderr", "err", err.Error()) } } if config.logConfig { err := logutil.LogConfig(&config) if err != nil { - level.Error(util.Logger).Log("msg", "failed to log config object", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "failed to log config object", "err", err.Error()) } } @@ -112,12 +111,12 @@ func main() { // Setting the environment variable JAEGER_AGENT_HOST enables tracing trace, err := tracing.NewFromEnv(fmt.Sprintf("loki-%s", config.Target)) if err != nil { - level.Error(util.Logger).Log("msg", "error in initializing tracing. tracing will not be enabled", "err", err) + level.Error(util_log.Logger).Log("msg", "error in initializing tracing. tracing will not be enabled", "err", err) } defer func() { if trace != nil { if err := trace.Close(); err != nil { - level.Error(util.Logger).Log("msg", "error closing tracing", "err", err) + level.Error(util_log.Logger).Log("msg", "error closing tracing", "err", err) } } @@ -128,7 +127,7 @@ func main() { t, err := loki.New(config.Config) util_log.CheckFatal("initialising loki", err) - level.Info(util.Logger).Log("msg", "Starting Loki", "version", version.Info()) + level.Info(util_log.Logger).Log("msg", "Starting Loki", "version", version.Info()) err = t.Run() util_log.CheckFatal("running loki", err) diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go index 9f8f3a06a4fa..cb540f095e7e 100644 --- a/cmd/migrate/main.go +++ b/cmd/migrate/main.go @@ -13,7 +13,7 @@ import ( "time" cortex_storage "github.com/cortexproject/cortex/pkg/chunk/storage" - cortex_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -93,7 +93,7 @@ func main() { } // Create a new registerer to avoid registering duplicate metrics prometheus.DefaultRegisterer = prometheus.NewRegistry() - sourceStore, err := cortex_storage.NewStore(sourceConfig.StorageConfig.Config, sourceConfig.ChunkStoreConfig, sourceConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, cortex_util.Logger) + sourceStore, err := cortex_storage.NewStore(sourceConfig.StorageConfig.Config, sourceConfig.ChunkStoreConfig, sourceConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util_log.Logger) if err != nil { log.Println("Failed to create source store:", err) os.Exit(1) @@ -106,7 +106,7 @@ func main() { // Create a new registerer to avoid registering duplicate metrics prometheus.DefaultRegisterer = prometheus.NewRegistry() - destStore, err := cortex_storage.NewStore(destConfig.StorageConfig.Config, destConfig.ChunkStoreConfig, destConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, cortex_util.Logger) + destStore, err := cortex_storage.NewStore(destConfig.StorageConfig.Config, destConfig.ChunkStoreConfig, destConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util_log.Logger) if err != nil { log.Println("Failed to create destination store:", err) os.Exit(1) diff --git a/cmd/promtail/main.go b/cmd/promtail/main.go index ea0ef529e41a..3b21e43b2641 100644 --- a/cmd/promtail/main.go +++ b/cmd/promtail/main.go @@ -8,8 +8,8 @@ import ( "k8s.io/klog" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" @@ -76,7 +76,7 @@ func main() { fmt.Println("Invalid log level") os.Exit(1) } - util.InitLogger(&config.ServerConfig.Config) + util_log.InitLogger(&config.ServerConfig.Config) // Use Stderr instead of files for the klog. klog.SetOutput(os.Stderr) @@ -90,28 +90,28 @@ func main() { if config.printConfig { err := logutil.PrintConfig(os.Stderr, &config) if err != nil { - level.Error(util.Logger).Log("msg", "failed to print config to stderr", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "failed to print config to stderr", "err", err.Error()) } } if config.logConfig { err := logutil.LogConfig(&config) if err != nil { - level.Error(util.Logger).Log("msg", "failed to log config object", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "failed to log config object", "err", err.Error()) } } p, err := promtail.New(config.Config, config.dryRun) if err != nil { - level.Error(util.Logger).Log("msg", "error creating promtail", "error", err) + level.Error(util_log.Logger).Log("msg", "error creating promtail", "error", err) os.Exit(1) } - level.Info(util.Logger).Log("msg", "Starting Promtail", "version", version.Info()) + level.Info(util_log.Logger).Log("msg", "Starting Promtail", "version", version.Info()) defer p.Shutdown() if err := p.Run(); err != nil { - level.Error(util.Logger).Log("msg", "error starting promtail", "error", err) + level.Error(util_log.Logger).Log("msg", "error starting promtail", "error", err) os.Exit(1) } } diff --git a/cmd/querytee/main.go b/cmd/querytee/main.go index 4f002764549f..9fb4aef0915b 100644 --- a/cmd/querytee/main.go +++ b/cmd/querytee/main.go @@ -9,7 +9,7 @@ import ( "github.com/weaveworks/common/logging" "github.com/weaveworks/common/server" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/tools/querytee" "github.com/grafana/loki/pkg/loghttp" @@ -29,7 +29,7 @@ func main() { cfg.ProxyConfig.RegisterFlags(flag.CommandLine) flag.Parse() - util.InitLogger(&server.Config{ + util_log.InitLogger(&server.Config{ LogLevel: cfg.LogLevel, }) @@ -39,19 +39,19 @@ func main() { i := querytee.NewInstrumentationServer(cfg.ServerMetricsPort, registry) if err := i.Start(); err != nil { - level.Error(util.Logger).Log("msg", "Unable to start instrumentation server", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "Unable to start instrumentation server", "err", err.Error()) os.Exit(1) } // Run the proxy. - proxy, err := querytee.NewProxy(cfg.ProxyConfig, util.Logger, lokiReadRoutes(cfg), registry) + proxy, err := querytee.NewProxy(cfg.ProxyConfig, util_log.Logger, lokiReadRoutes(cfg), registry) if err != nil { - level.Error(util.Logger).Log("msg", "Unable to initialize the proxy", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "Unable to initialize the proxy", "err", err.Error()) os.Exit(1) } if err := proxy.Start(); err != nil { - level.Error(util.Logger).Log("msg", "Unable to start the proxy", "err", err.Error()) + level.Error(util_log.Logger).Log("msg", "Unable to start the proxy", "err", err.Error()) os.Exit(1) } diff --git a/cmd/querytee/response_comparator.go b/cmd/querytee/response_comparator.go index 9ac529142e1f..85db997df79b 100644 --- a/cmd/querytee/response_comparator.go +++ b/cmd/querytee/response_comparator.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" jsoniter "github.com/json-iterator/go" @@ -46,7 +46,7 @@ func compareStreams(expectedRaw, actualRaw json.RawMessage, tolerance float64) e err := fmt.Errorf("expected %d values for stream %s but got %d", expectedValuesLen, expectedStream.Labels, actualValuesLen) if expectedValuesLen > 0 && actualValuesLen > 0 { - level.Error(util.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedStream.Entries[0].Timestamp.UnixNano(), + level.Error(util_log.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedStream.Entries[0].Timestamp.UnixNano(), "newest-expected-ts", expectedStream.Entries[expectedValuesLen-1].Timestamp.UnixNano(), "oldest-actual-ts", actualStream.Entries[0].Timestamp.UnixNano(), "newest-actual-ts", actualStream.Entries[actualValuesLen-1].Timestamp.UnixNano()) } diff --git a/go.mod b/go.mod index 650cd751efa8..9c3fc5e02423 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee + github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66 github.com/davecgh/go-spew v1.1.1 github.com/docker/docker v20.10.1+incompatible github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect @@ -24,7 +24,7 @@ require ( github.com/go-logfmt/logfmt v0.5.0 github.com/gofrs/flock v0.7.1 // indirect github.com/gogo/protobuf v1.3.1 // remember to update loki-build-image/Dockerfile too - github.com/golang/snappy v0.0.2 + github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 diff --git a/go.sum b/go.sum index a823e70b1689..f437c3f93be1 100644 --- a/go.sum +++ b/go.sum @@ -336,8 +336,9 @@ github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:P github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU= github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg= -github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee h1:Lj7kPgeuMHzoejxD4QQjYNMDqPNB5Uiqj0GvYaINnG0= -github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee/go.mod h1:uwptskTaCiJPGHaEsIthCBtnOA1nN+KpLDezYvbvU8o= +github.com/cortexproject/cortex v1.6.1-0.20210108144208-6c2dab103f20/go.mod h1:fOsaeeFSyWrjd9nFJO8KVUpsikcxnYsjEzQyjURBoQk= +github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66 h1:ZCpJ2TGDLw5dmDyO0owQLod4f+Q3oRwoqT8WXa1445g= +github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66/go.mod h1:hQ45oW8W7SKNBv4bkl1960kWyslLDbL2IWuzCQBCVGY= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= @@ -666,6 +667,8 @@ github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -1373,6 +1376,7 @@ github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1: github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9 h1:T6pkPNGKXv21lLfgD/mnIABj9aOhmz8HphDmKllfKWs= github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= +github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e h1:AecjdAG+yqtpJXxsems6dOD8GT7st5qU9uvlV93G3hw= github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e/go.mod h1:pZyryEk2SoMVjRI6XFqZLW7B9vPevv8lqwESVYjP1WA= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= @@ -1502,6 +1506,8 @@ github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52 h1:z3hglXVwJ4H github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ= github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51 h1:cinCqkVci8c5Dg6uB3m3351EjLAXDbwJVFT+bgwu/Ew= github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51/go.mod h1:kPvI4H0AynFiHDN95ZB28/k70ZPGCx+pBrRh6RZPimw= +github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe h1:YMGaJuBKOK3XtCxxezHClrV2OTImnSdzpMQnXG9nqgw= +github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDGYRNkgM+FCwYNOD+6tOV+DE2fpjzfV6iqXyOgFIw= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= @@ -1549,6 +1555,7 @@ github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 h1:dNVIG9aKQHR9T github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 h1:MS5M2antM8wzMUqVxIfAi+yb6yjXvDINRFvLnmNXeIw= github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= +github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120 h1:zQtcwREXYNvW116ipgc0bRDg1avD2b6QP0RGPLlPWkc= github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go index 98d1007d1e6a..151e16593b34 100644 --- a/pkg/chunkenc/memchunk.go +++ b/pkg/chunkenc/memchunk.go @@ -13,7 +13,7 @@ import ( "time" "github.com/cespare/xxhash/v2" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -342,7 +342,7 @@ func NewByteChunk(b []byte, blockSize, targetSize int) (*MemChunk, error) { // Verify checksums. expCRC := binary.BigEndian.Uint32(b[blk.offset+l:]) if expCRC != crc32.Checksum(blk.b, castagnoliTable) { - level.Error(util.Logger).Log("msg", "Checksum does not match for a block in chunk, this block will be skipped", "err", ErrInvalidChecksum) + level.Error(util_log.Logger).Log("msg", "Checksum does not match for a block in chunk, this block will be skipped", "err", ErrInvalidChecksum) continue } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 3ee704b7e1cc..eb88bc418713 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -9,8 +9,8 @@ import ( cortex_distributor "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" - cortex_util "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" lru "github.com/hashicorp/golang-lru" "github.com/pkg/errors" @@ -124,7 +124,7 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr ingestersRing: ingestersRing, distributorsRing: distributorsRing, validator: validator, - pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ingestersRing, factory, cortex_util.Logger), + pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ingestersRing, factory, util_log.Logger), ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), labelCache: labelCache, } @@ -234,10 +234,10 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log } const maxExpectedReplicationSet = 5 // typical replication factor 3 plus one for inactive plus one for luck - var descs [maxExpectedReplicationSet]ring.IngesterDesc + var descs [maxExpectedReplicationSet]ring.InstanceDesc samplesByIngester := map[string][]*streamTracker{} - ingesterDescs := map[string]ring.IngesterDesc{} + ingesterDescs := map[string]ring.InstanceDesc{} for i, key := range keys { replicationSet, err := d.ingestersRing.Get(key, ring.Write, descs[:0], nil, nil) if err != nil { @@ -258,7 +258,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log } tracker.samplesPending.Store(int32(len(streams))) for ingester, samples := range samplesByIngester { - go func(ingester ring.IngesterDesc, samples []*streamTracker) { + go func(ingester ring.InstanceDesc, samples []*streamTracker) { // Use a background context to make sure all ingesters get samples even if we return early localCtx, cancel := context.WithTimeout(context.Background(), d.clientCfg.RemoteTimeout) defer cancel() @@ -280,7 +280,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log } // TODO taken from Cortex, see if we can refactor out an usable interface. -func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDesc, streamTrackers []*streamTracker, pushTracker *pushTracker) { +func (d *Distributor) sendSamples(ctx context.Context, ingester ring.InstanceDesc, streamTrackers []*streamTracker, pushTracker *pushTracker) { err := d.sendSamplesErr(ctx, ingester, streamTrackers) // If we succeed, decrement each sample's pending count by one. If we reach @@ -312,7 +312,7 @@ func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDes } // TODO taken from Cortex, see if we can refactor out an usable interface. -func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.IngesterDesc, streams []*streamTracker) error { +func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.InstanceDesc, streams []*streamTracker) error { c, err := d.pool.GetClientFor(ingester.Addr) if err != nil { return err diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 05b09885bf8a..5903c701e057 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -294,7 +294,7 @@ func prepare(t *testing.T, limits *validation.Limits, kvStore kv.Client, factory replicationFactor: 3, } for addr := range ingesters { - ingestersRing.ingesters = append(ingestersRing.ingesters, ring.IngesterDesc{ + ingestersRing.ingesters = append(ingestersRing.ingesters, ring.InstanceDesc{ Addr: addr, }) } @@ -363,11 +363,11 @@ func (i *mockIngester) Close() error { // ingesters. type mockRing struct { prometheus.Counter - ingesters []ring.IngesterDesc + ingesters []ring.InstanceDesc replicationFactor uint32 } -func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc, _ []string, _ []string) (ring.ReplicationSet, error) { +func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) { result := ring.ReplicationSet{ MaxErrors: 1, Ingesters: buf[:0], diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 4f8cdd355d07..661a74c9c492 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -62,7 +62,7 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { func ParseRequest(r *http.Request) (*logproto.PushRequest, error) { userID, _ := user.ExtractOrgID(r.Context()) - logger := util_log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) body := lokiutil.NewSizeReader(r.Body) contentType := r.Header.Get(contentType) var req logproto.PushRequest diff --git a/pkg/helpers/logerror.go b/pkg/helpers/logerror.go index b6c723ee4e7e..7fc90291da1e 100644 --- a/pkg/helpers/logerror.go +++ b/pkg/helpers/logerror.go @@ -3,7 +3,6 @@ package helpers import ( "context" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" ) @@ -11,13 +10,13 @@ import ( // LogError logs any error returned by f; useful when deferring Close etc. func LogError(message string, f func() error) { if err := f(); err != nil { - level.Error(util.Logger).Log("message", message, "error", err) + level.Error(util_log.Logger).Log("message", message, "error", err) } } // LogError logs any error returned by f; useful when deferring Close etc. func LogErrorWithContext(ctx context.Context, message string, f func() error) { if err := f(); err != nil { - level.Error(util_log.WithContext(ctx, util.Logger)).Log("message", message, "error", err) + level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("message", message, "error", err) } } diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go index cc50263e3252..cebb90a1a6c2 100644 --- a/pkg/ingester/checkpoint.go +++ b/pkg/ingester/checkpoint.go @@ -11,7 +11,7 @@ import ( "time" "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/dustin/go-humanize" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -329,13 +329,13 @@ func (w *WALCheckpointWriter) Advance() (bool, error) { // Checkpoint is named after the last WAL segment present so that when replaying the WAL // we can start from that particular WAL segment. checkpointDir := filepath.Join(w.segmentWAL.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment)) - level.Info(util.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) + level.Info(util_log.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) checkpointDirTemp := checkpointDir + ".tmp" // cleanup any old partial checkpoints if _, err := os.Stat(checkpointDirTemp); err == nil { if err := os.RemoveAll(checkpointDirTemp); err != nil { - level.Error(util.Logger).Log("msg", "unable to cleanup old tmp checkpoint", "dir", checkpointDirTemp) + level.Error(util_log.Logger).Log("msg", "unable to cleanup old tmp checkpoint", "dir", checkpointDirTemp) return false, err } } @@ -344,7 +344,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) { return false, errors.Wrap(err, "create checkpoint dir") } - checkpoint, err := wal.NewSize(log.With(util.Logger, "component", "checkpoint_wal"), nil, checkpointDirTemp, walSegmentSize, false) + checkpoint, err := wal.NewSize(log.With(util_log.Logger, "component", "checkpoint_wal"), nil, checkpointDirTemp, walSegmentSize, false) if err != nil { return false, errors.Wrap(err, "open checkpoint") } @@ -370,7 +370,7 @@ func (w *WALCheckpointWriter) Write(s *Series) error { w.recs = append(w.recs, b) w.bufSize += len(b) - level.Debug(util.Logger).Log("msg", "writing series", "size", humanize.Bytes(uint64(len(b)))) + level.Debug(util_log.Logger).Log("msg", "writing series", "size", humanize.Bytes(uint64(len(b)))) // 1MB if w.bufSize > 1<<20 { @@ -382,7 +382,7 @@ func (w *WALCheckpointWriter) Write(s *Series) error { } func (w *WALCheckpointWriter) flush() error { - level.Debug(util.Logger).Log("msg", "flushing series", "totalSize", humanize.Bytes(uint64(w.bufSize)), "series", len(w.recs)) + level.Debug(util_log.Logger).Log("msg", "flushing series", "totalSize", humanize.Bytes(uint64(w.bufSize)), "series", len(w.recs)) if err := w.checkpointWAL.Log(w.recs...); err != nil { return err } @@ -491,21 +491,21 @@ func (w *WALCheckpointWriter) Close(abort bool) error { if err := fileutil.Replace(w.checkpointWAL.Dir(), w.final); err != nil { return errors.Wrap(err, "rename checkpoint directory") } - level.Info(util.Logger).Log("msg", "atomic checkpoint finished", "old", w.checkpointWAL.Dir(), "new", w.final) + level.Info(util_log.Logger).Log("msg", "atomic checkpoint finished", "old", w.checkpointWAL.Dir(), "new", w.final) // We delete the WAL segments which are before the previous checkpoint and not before the // current checkpoint created. This is because if the latest checkpoint is corrupted for any reason, we // should be able to recover from the older checkpoint which would need the older WAL segments. if err := w.segmentWAL.Truncate(w.lastSegment + 1); err != nil { // It is fine to have old WAL segments hanging around if deletion failed. // We can try again next time. - level.Error(util.Logger).Log("msg", "error deleting old WAL segments", "err", err, "lastSegment", w.lastSegment) + level.Error(util_log.Logger).Log("msg", "error deleting old WAL segments", "err", err, "lastSegment", w.lastSegment) } if w.lastSegment >= 0 { if err := w.deleteCheckpoints(w.lastSegment); err != nil { // It is fine to have old checkpoints hanging around if deletion failed. // We can try again next time. - level.Error(util.Logger).Log("msg", "error deleting old checkpoint", "err", err) + level.Error(util_log.Logger).Log("msg", "error deleting old checkpoint", "err", err) } } @@ -562,7 +562,7 @@ func (c *Checkpointer) PerformCheckpoint() (err error) { start := time.Now() defer func() { elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "checkpoint done", "time", elapsed.String()) + level.Info(util_log.Logger).Log("msg", "checkpoint done", "time", elapsed.String()) c.metrics.checkpointDuration.Observe(elapsed.Seconds()) }() @@ -604,9 +604,9 @@ func (c *Checkpointer) Run() { for { select { case <-ticker.C: - level.Info(util.Logger).Log("msg", "starting checkpoint") + level.Info(util_log.Logger).Log("msg", "starting checkpoint") if err := c.PerformCheckpoint(); err != nil { - level.Error(util.Logger).Log("msg", "error checkpointing series", "err", err) + level.Error(util_log.Logger).Log("msg", "error checkpointing series", "err", err) continue } case <-c.quit: diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go index 3a9fc7041077..b3f207119f85 100644 --- a/pkg/ingester/flush.go +++ b/pkg/ingester/flush.go @@ -201,7 +201,7 @@ func (i *Ingester) sweepStream(instance *instance, stream *stream, immediate boo func (i *Ingester) flushLoop(j int) { defer func() { - level.Debug(util.Logger).Log("msg", "Ingester.flushLoop() exited") + level.Debug(util_log.Logger).Log("msg", "Ingester.flushLoop() exited") i.flushQueuesDone.Done() }() @@ -212,11 +212,11 @@ func (i *Ingester) flushLoop(j int) { } op := o.(*flushOp) - level.Debug(util.Logger).Log("msg", "flushing stream", "userid", op.userID, "fp", op.fp, "immediate", op.immediate) + level.Debug(util_log.Logger).Log("msg", "flushing stream", "userid", op.userID, "fp", op.fp, "immediate", op.immediate) err := i.flushUserSeries(op.userID, op.fp, op.immediate) if err != nil { - level.Error(util_log.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err) + level.Error(util_log.WithUserID(op.userID, util_log.Logger)).Log("msg", "failed to flush user", "err", err) } // If we're exiting & we failed to flush, put the failed operation diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 5918237b6283..66d172f5cbb0 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -37,10 +37,6 @@ const ( samplesPerSeries = 100 ) -func init() { - // util.Logger = log.NewLogfmtLogger(os.Stdout) -} - func TestChunkFlushingIdle(t *testing.T) { cfg := defaultIngesterTestConfig(t) cfg.FlushCheckPeriod = 20 * time.Millisecond diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 4c0d9f56c02d..25047a454931 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -12,6 +12,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -236,7 +237,7 @@ func (i *Ingester) starting(ctx context.Context) error { start := time.Now() - level.Info(util.Logger).Log("msg", "recovering from checkpoint") + level.Info(util_log.Logger).Log("msg", "recovering from checkpoint") checkpointReader, checkpointCloser, err := newCheckpointReader(i.cfg.WAL.Dir) if err != nil { return err @@ -246,19 +247,19 @@ func (i *Ingester) starting(ctx context.Context) error { checkpointRecoveryErr := RecoverCheckpoint(checkpointReader, recoverer) if checkpointRecoveryErr != nil { i.metrics.walCorruptionsTotal.WithLabelValues(walTypeCheckpoint).Inc() - level.Error(util.Logger).Log( + level.Error(util_log.Logger).Log( "msg", `Recovered from checkpoint with errors. Some streams were likely not recovered due to WAL checkpoint file corruptions (or WAL file deletions while Loki is running). No administrator action is needed and data loss is only a possibility if more than (replication factor / 2 + 1) ingesters suffer from this.`, "elapsed", time.Since(start).String(), ) } - level.Info(util.Logger).Log( + level.Info(util_log.Logger).Log( "msg", "recovered WAL checkpoint recovery finished", "elapsed", time.Since(start).String(), "errors", checkpointRecoveryErr != nil, ) - level.Info(util.Logger).Log("msg", "recovering from WAL") + level.Info(util_log.Logger).Log("msg", "recovering from WAL") segmentReader, segmentCloser, err := newWalReader(i.cfg.WAL.Dir, -1) if err != nil { return err @@ -268,13 +269,13 @@ func (i *Ingester) starting(ctx context.Context) error { segmentRecoveryErr := RecoverWAL(segmentReader, recoverer) if segmentRecoveryErr != nil { i.metrics.walCorruptionsTotal.WithLabelValues(walTypeSegment).Inc() - level.Error(util.Logger).Log( + level.Error(util_log.Logger).Log( "msg", "Recovered from WAL segments with errors. Some streams and/or entries were likely not recovered due to WAL segment file corruptions (or WAL file deletions while Loki is running). No administrator action is needed and data loss is only a possibility if more than (replication factor / 2 + 1) ingesters suffer from this.", "elapsed", time.Since(start).String(), ) } - level.Info(util.Logger).Log( + level.Info(util_log.Logger).Log( "msg", "WAL segment recovery finished", "elapsed", time.Since(start).String(), "errors", segmentRecoveryErr != nil, @@ -282,7 +283,7 @@ func (i *Ingester) starting(ctx context.Context) error { elapsed := time.Since(start) i.metrics.walReplayDuration.Set(elapsed.Seconds()) - level.Info(util.Logger).Log("msg", "recovery finished", "time", elapsed.String()) + level.Info(util_log.Logger).Log("msg", "recovery finished", "time", elapsed.String()) } diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index a2f83e4f5b99..6d1b0c7bd755 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -19,8 +19,8 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ingester/index" - "github.com/cortexproject/cortex/pkg/util" cutil "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/grafana/loki/pkg/helpers" "github.com/grafana/loki/pkg/iter" @@ -175,7 +175,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { if e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOSPC { i.metrics.walDiskFullFailures.Inc() i.flushOnShutdownSwitch.TriggerAnd(func() { - level.Error(util.Logger).Log( + level.Error(util_log.Logger).Log( "msg", "Error writing to WAL, disk full, no further messages will be logged for this error", ) diff --git a/pkg/ingester/mapper.go b/pkg/ingester/mapper.go index ee1feb7b74e3..3e1d99040f93 100644 --- a/pkg/ingester/mapper.go +++ b/pkg/ingester/mapper.go @@ -8,7 +8,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" "go.uber.org/atomic" @@ -104,7 +104,7 @@ func (m *fpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels. // A new mapping has to be created. mappedFP = m.nextMappedFP() mappedFPs[ms] = mappedFP - level.Info(util.Logger).Log( + level.Info(util_log.Logger).Log( "msg", "fingerprint collision detected, mapping to new fingerprint", "old_fp", fp, "new_fp", mappedFP, @@ -118,7 +118,7 @@ func (m *fpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels. m.mtx.Lock() m.mappings[fp] = mappedFPs m.mtx.Unlock() - level.Info(util.Logger).Log( + level.Info(util_log.Logger).Log( "msg", "fingerprint collision detected, mapping to new fingerprint", "old_fp", fp, "new_fp", mappedFP, diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go index ef5ef062c96d..9e63f8262876 100644 --- a/pkg/ingester/recovery.go +++ b/pkg/ingester/recovery.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/prometheus/tsdb/record" @@ -70,7 +70,7 @@ func newCheckpointReader(dir string) (WALReader, io.Closer, error) { return nil, nil, err } if idx < 0 { - level.Info(util.Logger).Log("msg", "no checkpoint found, treating as no-op") + level.Info(util_log.Logger).Log("msg", "no checkpoint found, treating as no-op") var reader NoopWALReader return reader, reader, nil } diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 73aafd9afe4d..a531cfc24553 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -8,7 +8,6 @@ import ( "sync" "time" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -181,7 +180,7 @@ func (s *stream) Push( if err != nil { // This should be an unlikely situation, returning an error up the stack doesn't help much here // so instead log this to help debug the issue if it ever arises. - level.Error(util_log.WithContext(ctx, util.Logger)).Log("msg", "failed to Close chunk", "err", err) + level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "failed to Close chunk", "err", err) } chunk.closed = true diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index 224ba122966c..2822b0d562bf 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -6,7 +6,6 @@ import ( "sync" "time" - cortex_util "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/prometheus/pkg/labels" @@ -102,7 +101,7 @@ func (t *tailer) loop() { if err != nil { // Don't log any error due to tail client closing the connection if !util.IsConnCanceled(err) { - level.Error(util_log.WithContext(t.conn.Context(), cortex_util.Logger)).Log("msg", "Error writing to tail client", "err", err) + level.Error(util_log.WithContext(t.conn.Context(), util_log.Logger)).Log("msg", "Error writing to tail client", "err", err) } t.close() return diff --git a/pkg/ingester/transfer.go b/pkg/ingester/transfer.go index 0080872eec9b..545dc769d5fc 100644 --- a/pkg/ingester/transfer.go +++ b/pkg/ingester/transfer.go @@ -37,7 +37,7 @@ var ( // TransferChunks receives all chunks from another ingester. The Ingester // must be in PENDING state or else the call will fail. func (i *Ingester) TransferChunks(stream logproto.Ingester_TransferChunksServer) error { - logger := util_log.WithContext(stream.Context(), util.Logger) + logger := util_log.WithContext(stream.Context(), util_log.Logger) // Prevent a shutdown from happening until we've completely finished a handoff // from a leaving ingester. i.shutdownMtx.Lock() @@ -198,7 +198,7 @@ func (i *Ingester) TransferOut(ctx context.Context) error { return nil } - level.Error(util_log.WithContext(ctx, util.Logger)).Log("msg", "transfer failed", "err", err) + level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "transfer failed", "err", err) backoff.Wait() } @@ -206,7 +206,7 @@ func (i *Ingester) TransferOut(ctx context.Context) error { } func (i *Ingester) transferOut(ctx context.Context) error { - logger := util_log.WithContext(ctx, util.Logger) + logger := util_log.WithContext(ctx, util_log.Logger) targetIngester, err := i.findTransferTarget(ctx) if err != nil { return fmt.Errorf("cannot find ingester to transfer chunks to: %v", err) @@ -296,7 +296,7 @@ func (i *Ingester) transferOut(ctx context.Context) error { // findTransferTarget finds an ingester in a PENDING state to use for transferring // chunks to. -func (i *Ingester) findTransferTarget(ctx context.Context) (*ring.IngesterDesc, error) { +func (i *Ingester) findTransferTarget(ctx context.Context) (*ring.InstanceDesc, error) { ringDesc, err := i.lifecycler.KVStore.Get(ctx, ring.IngesterRingKey) if err != nil { return nil, err diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go index 0d33bdef87a9..28ec897282b0 100644 --- a/pkg/ingester/transfer_test.go +++ b/pkg/ingester/transfer_test.go @@ -10,7 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" @@ -204,7 +204,7 @@ func (c *testIngesterClient) TransferChunks(context.Context, ...grpc.CallOption) c.i.stopIncomingRequests() // used to be called from lifecycler, now it must be called *before* stopping lifecyler. (ingester does this on shutdown) err := services.StopAndAwaitTerminated(context.Background(), c.i.lifecycler) if err != nil { - level.Error(util.Logger).Log("msg", "lifecycler failed", "err", err) + level.Error(util_log.Logger).Log("msg", "lifecycler failed", "err", err) } }() diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go index 59bd6037a156..83d69d3f5ded 100644 --- a/pkg/ingester/wal.go +++ b/pkg/ingester/wal.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -81,7 +81,7 @@ func newWAL(cfg WALConfig, registerer prometheus.Registerer, metrics *ingesterMe return noopWAL{}, nil } - tsdbWAL, err := wal.NewSize(util.Logger, registerer, cfg.Dir, walSegmentSize, false) + tsdbWAL, err := wal.NewSize(util_log.Logger, registerer, cfg.Dir, walSegmentSize, false) if err != nil { return nil, err } @@ -138,7 +138,7 @@ func (w *walWrapper) Stop() error { close(w.quit) w.wait.Wait() err := w.wal.Close() - level.Info(util.Logger).Log("msg", "stopped", "component", "wal") + level.Info(util_log.Logger).Log("msg", "stopped", "component", "wal") return err } @@ -150,7 +150,7 @@ func (w *walWrapper) checkpointWriter() *WALCheckpointWriter { } func (w *walWrapper) run() { - level.Info(util.Logger).Log("msg", "started", "component", "wal") + level.Info(util_log.Logger).Log("msg", "started", "component", "wal") defer w.wait.Done() checkpointer := NewCheckpointer( diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go index 43added3f72a..a3403c4b0298 100644 --- a/pkg/logcli/query/query.go +++ b/pkg/logcli/query/query.go @@ -13,7 +13,7 @@ import ( "time" cortex_storage "github.com/cortexproject/cortex/pkg/chunk/storage" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/fatih/color" json "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" @@ -182,7 +182,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string return err } - if err := conf.Validate(util.Logger); err != nil { + if err := conf.Validate(util_log.Logger); err != nil { return err } @@ -191,7 +191,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string return err } - chunkStore, err := cortex_storage.NewStore(conf.StorageConfig.Config, conf.ChunkStoreConfig, conf.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util.Logger) + chunkStore, err := cortex_storage.NewStore(conf.StorageConfig.Config, conf.ChunkStoreConfig, conf.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util_log.Logger) if err != nil { return err } diff --git a/pkg/logentry/stages/drop_test.go b/pkg/logentry/stages/drop_test.go index b82066272013..5e2f3b9e4649 100644 --- a/pkg/logentry/stages/drop_test.go +++ b/pkg/logentry/stages/drop_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" @@ -39,7 +39,7 @@ func Test_dropStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util.InitLogger(cfg) + util_log.InitLogger(cfg) Debug = true tests := []struct { @@ -265,7 +265,7 @@ func Test_dropStage_Process(t *testing.T) { if err != nil { t.Error(err) } - m, err := newDropStage(util.Logger, tt.config, prometheus.DefaultRegisterer) + m, err := newDropStage(util_log.Logger, tt.config, prometheus.DefaultRegisterer) require.NoError(t, err) out := processEntries(m, newEntry(tt.extracted, tt.labels, tt.entry, tt.t)) if tt.shouldDrop { @@ -285,7 +285,7 @@ func ptrFromString(str string) *string { func TestDropPipeline(t *testing.T) { registry := prometheus.NewRegistry() plName := "test_pipeline" - pl, err := NewPipeline(util.Logger, loadConfig(testDropYaml), &plName, registry) + pl, err := NewPipeline(util_log.Logger, loadConfig(testDropYaml), &plName, registry) require.NoError(t, err) out := processEntries(pl, newEntry(nil, nil, testMatchLogLineApp1, time.Now()), diff --git a/pkg/logentry/stages/extensions_test.go b/pkg/logentry/stages/extensions_test.go index 1852db978023..b9d66b0b3d9c 100644 --- a/pkg/logentry/stages/extensions_test.go +++ b/pkg/logentry/stages/extensions_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" ) @@ -65,7 +65,7 @@ func TestNewDocker(t *testing.T) { tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() - p, err := NewDocker(util.Logger, prometheus.DefaultRegisterer) + p, err := NewDocker(util_log.Logger, prometheus.DefaultRegisterer) if err != nil { t.Fatalf("failed to create Docker parser: %s", err) } @@ -139,7 +139,7 @@ func TestNewCri(t *testing.T) { tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() - p, err := NewCRI(util.Logger, prometheus.DefaultRegisterer) + p, err := NewCRI(util_log.Logger, prometheus.DefaultRegisterer) if err != nil { t.Fatalf("failed to create CRI parser: %s", err) } diff --git a/pkg/logentry/stages/json_test.go b/pkg/logentry/stages/json_test.go index 8b94215b4b62..91fbdf05859a 100644 --- a/pkg/logentry/stages/json_test.go +++ b/pkg/logentry/stages/json_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" @@ -82,7 +82,7 @@ func TestPipeline_JSON(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } @@ -355,7 +355,7 @@ func TestJSONParser_Parse(t *testing.T) { tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() - p, err := New(util.Logger, nil, StageTypeJSON, tt.config, nil) + p, err := New(util_log.Logger, nil, StageTypeJSON, tt.config, nil) if err != nil { t.Fatalf("failed to create json parser: %s", err) } diff --git a/pkg/logentry/stages/labeldrop_test.go b/pkg/logentry/stages/labeldrop_test.go index 1e7baeb91178..5111cc34108f 100644 --- a/pkg/logentry/stages/labeldrop_test.go +++ b/pkg/logentry/stages/labeldrop_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -15,7 +15,7 @@ func Test_dropLabelStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util.InitLogger(cfg) + util_log.InitLogger(cfg) Debug = true tests := []struct { diff --git a/pkg/logentry/stages/labels_test.go b/pkg/logentry/stages/labels_test.go index 532d16e4be4c..261d32ed3b44 100644 --- a/pkg/logentry/stages/labels_test.go +++ b/pkg/logentry/stages/labels_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -43,7 +43,7 @@ var testLabelsLogLineWithMissingKey = ` ` func TestLabelsPipeline_Labels(t *testing.T) { - pl, err := NewPipeline(util.Logger, loadConfig(testLabelsYaml), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testLabelsYaml), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } @@ -178,7 +178,7 @@ func TestLabelStage_Process(t *testing.T) { test := test t.Run(name, func(t *testing.T) { t.Parallel() - st, err := newLabelStage(util.Logger, test.config) + st, err := newLabelStage(util_log.Logger, test.config) if err != nil { t.Fatal(err) } diff --git a/pkg/logentry/stages/match_test.go b/pkg/logentry/stages/match_test.go index ee285dbc6523..80eac4061254 100644 --- a/pkg/logentry/stages/match_test.go +++ b/pkg/logentry/stages/match_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" ) @@ -57,7 +57,7 @@ var testMatchLogLineApp2 = ` func TestMatchPipeline(t *testing.T) { registry := prometheus.NewRegistry() plName := "test_pipeline" - pl, err := NewPipeline(util.Logger, loadConfig(testMatchYaml), &plName, registry) + pl, err := NewPipeline(util_log.Logger, loadConfig(testMatchYaml), &plName, registry) if err != nil { t.Fatal(err) } @@ -150,7 +150,7 @@ func TestMatcher(t *testing.T) { tt.action, nil, } - s, err := newMatcherStage(util.Logger, nil, matchConfig, prometheus.DefaultRegisterer) + s, err := newMatcherStage(util_log.Logger, nil, matchConfig, prometheus.DefaultRegisterer) if (err != nil) != tt.wantErr { t.Errorf("withMatcher() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/logentry/stages/metrics_test.go b/pkg/logentry/stages/metrics_test.go index 0505cf99af22..54b2439887fa 100644 --- a/pkg/logentry/stages/metrics_test.go +++ b/pkg/logentry/stages/metrics_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -110,7 +110,7 @@ promtail_custom_total_lines_count{test="app"} 2 func TestMetricsPipeline(t *testing.T) { registry := prometheus.NewRegistry() - pl, err := NewPipeline(util.Logger, loadConfig(testMetricYaml), nil, registry) + pl, err := NewPipeline(util_log.Logger, loadConfig(testMetricYaml), nil, registry) if err != nil { t.Fatal(err) } @@ -169,7 +169,7 @@ promtail_custom_loki_count 1 func TestMetricsWithDropInPipeline(t *testing.T) { registry := prometheus.NewRegistry() - pl, err := NewPipeline(util.Logger, loadConfig(testMetricWithDropYaml), nil, registry) + pl, err := NewPipeline(util_log.Logger, loadConfig(testMetricWithDropYaml), nil, registry) if err != nil { t.Fatal(err) } @@ -260,7 +260,7 @@ func TestDefaultIdleDuration(t *testing.T) { }, }, } - ms, err := New(util.Logger, nil, StageTypeMetric, metricsConfig, registry) + ms, err := New(util_log.Logger, nil, StageTypeMetric, metricsConfig, registry) if err != nil { t.Fatalf("failed to create stage with metrics: %v", err) } @@ -358,15 +358,15 @@ func TestMetricStage_Process(t *testing.T) { } registry := prometheus.NewRegistry() - jsonStage, err := New(util.Logger, nil, StageTypeJSON, jsonConfig, registry) + jsonStage, err := New(util_log.Logger, nil, StageTypeJSON, jsonConfig, registry) if err != nil { t.Fatalf("failed to create stage with metrics: %v", err) } - regexStage, err := New(util.Logger, nil, StageTypeRegex, regexConfig, registry) + regexStage, err := New(util_log.Logger, nil, StageTypeRegex, regexConfig, registry) if err != nil { t.Fatalf("failed to create stage with metrics: %v", err) } - metricStage, err := New(util.Logger, nil, StageTypeMetric, metricsConfig, registry) + metricStage, err := New(util_log.Logger, nil, StageTypeMetric, metricsConfig, registry) if err != nil { t.Fatalf("failed to create stage with metrics: %v", err) } diff --git a/pkg/logentry/stages/multiline_test.go b/pkg/logentry/stages/multiline_test.go index 52f872474155..69fb4e66dd36 100644 --- a/pkg/logentry/stages/multiline_test.go +++ b/pkg/logentry/stages/multiline_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ww "github.com/weaveworks/common/server" @@ -19,7 +19,7 @@ func Test_multilineStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util.InitLogger(cfg) + util_log.InitLogger(cfg) Debug = true mcfg := &MultilineConfig{Expression: ptrFromString("^START"), MaxWaitTime: ptrFromString("3s")} @@ -28,7 +28,7 @@ func Test_multilineStage_Process(t *testing.T) { stage := &multilineStage{ cfg: mcfg, - logger: util.Logger, + logger: util_log.Logger, } out := processEntries(stage, @@ -51,7 +51,7 @@ func Test_multilineStage_MultiStreams(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util.InitLogger(cfg) + util_log.InitLogger(cfg) Debug = true mcfg := &MultilineConfig{Expression: ptrFromString("^START"), MaxWaitTime: ptrFromString("3s")} @@ -60,7 +60,7 @@ func Test_multilineStage_MultiStreams(t *testing.T) { stage := &multilineStage{ cfg: mcfg, - logger: util.Logger, + logger: util_log.Logger, } out := processEntries(stage, @@ -96,7 +96,7 @@ func Test_multilineStage_MaxWaitTime(t *testing.T) { // Enable debug logging cfg := &ww.Config{} require.Nil(t, cfg.LogLevel.Set("debug")) - util.InitLogger(cfg) + util_log.InitLogger(cfg) Debug = true maxWait := 2 * time.Second @@ -106,7 +106,7 @@ func Test_multilineStage_MaxWaitTime(t *testing.T) { stage := &multilineStage{ cfg: mcfg, - logger: util.Logger, + logger: util_log.Logger, } in := make(chan Entry, 2) diff --git a/pkg/logentry/stages/output_test.go b/pkg/logentry/stages/output_test.go index 760617b0ca67..8175c0a06a9b 100644 --- a/pkg/logentry/stages/output_test.go +++ b/pkg/logentry/stages/output_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -43,7 +43,7 @@ var testOutputLogLineWithMissingKey = ` ` func TestPipeline_Output(t *testing.T) { - pl, err := NewPipeline(util.Logger, loadConfig(testOutputYaml), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testOutputYaml), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } @@ -122,7 +122,7 @@ func TestOutputStage_Process(t *testing.T) { test := test t.Run(name, func(t *testing.T) { t.Parallel() - st, err := newOutputStage(util.Logger, test.config) + st, err := newOutputStage(util_log.Logger, test.config) if err != nil { t.Fatal(err) } diff --git a/pkg/logentry/stages/pipeline_test.go b/pkg/logentry/stages/pipeline_test.go index f3ee08aaa047..68e05dab06bb 100644 --- a/pkg/logentry/stages/pipeline_test.go +++ b/pkg/logentry/stages/pipeline_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -88,7 +88,7 @@ func loadConfig(yml string) PipelineStages { func TestNewPipeline(t *testing.T) { - p, err := NewPipeline(util.Logger, loadConfig(testMultiStageYaml), nil, prometheus.DefaultRegisterer) + p, err := NewPipeline(util_log.Logger, loadConfig(testMultiStageYaml), nil, prometheus.DefaultRegisterer) if err != nil { panic(err) } @@ -200,7 +200,7 @@ func TestPipeline_Process(t *testing.T) { err := yaml.Unmarshal([]byte(tt.config), &config) require.NoError(t, err) - p, err := NewPipeline(util.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer) + p, err := NewPipeline(util_log.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer) require.NoError(t, err) out := processEntries(p, newEntry(nil, tt.initialLabels, tt.entry, tt.t))[0] @@ -273,7 +273,7 @@ func TestPipeline_Wrap(t *testing.T) { if err != nil { panic(err) } - p, err := NewPipeline(util.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer) + p, err := NewPipeline(util_log.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer) if err != nil { panic(err) } diff --git a/pkg/logentry/stages/regex_test.go b/pkg/logentry/stages/regex_test.go index 8773b4d564c5..5e8c683693fa 100644 --- a/pkg/logentry/stages/regex_test.go +++ b/pkg/logentry/stages/regex_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -101,7 +101,7 @@ func TestPipeline_Regex(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } @@ -322,7 +322,7 @@ func TestRegexParser_Parse(t *testing.T) { tt := tt t.Run(tName, func(t *testing.T) { t.Parallel() - p, err := New(util.Logger, nil, StageTypeRegex, tt.config, nil) + p, err := New(util_log.Logger, nil, StageTypeRegex, tt.config, nil) if err != nil { t.Fatalf("failed to create regex parser: %s", err) } @@ -361,7 +361,7 @@ func BenchmarkRegexStage(b *testing.B) { } for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { - stage, err := New(util.Logger, nil, StageTypeRegex, bm.config, nil) + stage, err := New(util_log.Logger, nil, StageTypeRegex, bm.config, nil) if err != nil { panic(err) } diff --git a/pkg/logentry/stages/replace_test.go b/pkg/logentry/stages/replace_test.go index 4e2176ed82de..eb2253019897 100644 --- a/pkg/logentry/stages/replace_test.go +++ b/pkg/logentry/stages/replace_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" @@ -121,7 +121,7 @@ func TestPipeline_Replace(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } diff --git a/pkg/logentry/stages/template_test.go b/pkg/logentry/stages/template_test.go index 0c09e90d9175..4049110a71eb 100644 --- a/pkg/logentry/stages/template_test.go +++ b/pkg/logentry/stages/template_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -56,7 +56,7 @@ var testTemplateLogLineWithMissingKey = ` ` func TestPipeline_Template(t *testing.T) { - pl, err := NewPipeline(util.Logger, loadConfig(testTemplateYaml), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testTemplateYaml), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } @@ -365,7 +365,7 @@ func TestTemplateStage_Process(t *testing.T) { test := test t.Run(name, func(t *testing.T) { t.Parallel() - st, err := newTemplateStage(util.Logger, test.config) + st, err := newTemplateStage(util_log.Logger, test.config) if err != nil { t.Fatal(err) } diff --git a/pkg/logentry/stages/tenant_test.go b/pkg/logentry/stages/tenant_test.go index 0999ec71ad6a..2a65c41f7db4 100644 --- a/pkg/logentry/stages/tenant_test.go +++ b/pkg/logentry/stages/tenant_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -100,7 +100,7 @@ func TestTenantStage_Validation(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { - stage, err := newTenantStage(util.Logger, testData.config) + stage, err := newTenantStage(util_log.Logger, testData.config) if testData.expectedErr != nil { assert.EqualError(t, err, *testData.expectedErr) @@ -170,7 +170,7 @@ func TestTenantStage_Process(t *testing.T) { testData := testData t.Run(testName, func(t *testing.T) { - stage, err := newTenantStage(util.Logger, testData.config) + stage, err := newTenantStage(util_log.Logger, testData.config) require.NoError(t, err) // Process and dummy line and ensure nothing has changed except diff --git a/pkg/logentry/stages/timestamp_test.go b/pkg/logentry/stages/timestamp_test.go index 422b8a0f1abd..7e37d1de26ed 100644 --- a/pkg/logentry/stages/timestamp_test.go +++ b/pkg/logentry/stages/timestamp_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -46,7 +46,7 @@ var testTimestampLogLineWithMissingKey = ` ` func TestTimestampPipeline(t *testing.T) { - pl, err := NewPipeline(util.Logger, loadConfig(testTimestampYaml), nil, prometheus.DefaultRegisterer) + pl, err := NewPipeline(util_log.Logger, loadConfig(testTimestampYaml), nil, prometheus.DefaultRegisterer) if err != nil { t.Fatal(err) } @@ -298,7 +298,7 @@ func TestTimestampStage_Process(t *testing.T) { test := test t.Run(name, func(t *testing.T) { t.Parallel() - st, err := newTimestampStage(util.Logger, test.config) + st, err := newTimestampStage(util_log.Logger, test.config) if err != nil { t.Fatal(err) } @@ -439,7 +439,7 @@ func TestTimestampStage_ProcessActionOnFailure(t *testing.T) { // Ensure the test has been correctly set require.Equal(t, len(testData.inputEntries), len(testData.expectedTimestamps)) - s, err := newTimestampStage(util.Logger, testData.config) + s, err := newTimestampStage(util_log.Logger, testData.config) require.NoError(t, err) for i, inputEntry := range testData.inputEntries { diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go index 50489c723552..d992691aeb7d 100644 --- a/pkg/logql/metrics.go +++ b/pkg/logql/metrics.go @@ -5,7 +5,6 @@ import ( "strings" "time" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/dustin/go-humanize" "github.com/go-kit/kit/log/level" @@ -66,7 +65,7 @@ var ( ) func RecordMetrics(ctx context.Context, p Params, status string, stats stats.Result) { - logger := util_log.WithContext(ctx, util.Logger) + logger := util_log.WithContext(ctx, util_log.Logger) queryType, err := QueryType(p.Query()) if err != nil { level.Warn(logger).Log("msg", "error parsing query type", "err", err) diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go index c6cb39336d4b..dd4d42d11a69 100644 --- a/pkg/logql/metrics_test.go +++ b/pkg/logql/metrics_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/opentracing/opentracing-go" "github.com/stretchr/testify/require" @@ -53,7 +53,7 @@ func TestQueryType(t *testing.T) { func TestLogSlowQuery(t *testing.T) { buf := bytes.NewBufferString("") - util.Logger = log.NewLogfmtLogger(buf) + util_log.Logger = log.NewLogfmtLogger(buf) tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter()) defer c.Close() opentracing.SetGlobalTracer(tr) @@ -80,5 +80,5 @@ func TestLogSlowQuery(t *testing.T) { sp.Context().(jaeger.SpanContext).SpanID().String(), ), buf.String()) - util.Logger = log.NewNopLogger() + util_log.Logger = log.NewNopLogger() } diff --git a/pkg/logql/sharding.go b/pkg/logql/sharding.go index 4f060d3d4d51..f315000893b6 100644 --- a/pkg/logql/sharding.go +++ b/pkg/logql/sharding.go @@ -7,7 +7,7 @@ import ( "time" "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/prometheus/promql" @@ -168,7 +168,7 @@ func (ev DownstreamEvaluator) Downstream(ctx context.Context, queries []Downstre for _, res := range results { if err := stats.JoinResults(ctx, res.Statistics); err != nil { - level.Warn(util.Logger).Log("msg", "unable to merge downstream results", "err", err) + level.Warn(util_log.Logger).Log("msg", "unable to merge downstream results", "err", err) } } @@ -241,7 +241,7 @@ func (ev *DownstreamEvaluator) StepEvaluator( for i, res := range results { stepper, err := ResultStepEvaluator(res, params) if err != nil { - level.Warn(util.Logger).Log( + level.Warn(util_log.Logger).Log( "msg", "could not extract StepEvaluator", "err", err, "expr", queries[i].Expr.String(), @@ -306,7 +306,7 @@ func (ev *DownstreamEvaluator) Iterator( for i, res := range results { iter, err := ResultIterator(res, params) if err != nil { - level.Warn(util.Logger).Log( + level.Warn(util_log.Logger).Log( "msg", "could not extract Iterator", "err", err, "expr", queries[i].Expr.String(), diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go index 628fa1895666..893febe8b09d 100644 --- a/pkg/logql/shardmapper.go +++ b/pkg/logql/shardmapper.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -271,7 +271,7 @@ func (m ShardMapper) mapVectorAggregationExpr(expr *vectorAggregationExpr, r *sh default: // this should not be reachable. If an operation is shardable it should // have an optimization listed. - level.Warn(util.Logger).Log( + level.Warn(util_log.Logger).Log( "msg", "unexpected operation which appears shardable, ignoring", "operation", expr.operation, ) diff --git a/pkg/logql/stats/context_test.go b/pkg/logql/stats/context_test.go index 8a688494ff57..b791597772e2 100644 --- a/pkg/logql/stats/context_test.go +++ b/pkg/logql/stats/context_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" jsoniter "github.com/json-iterator/go" "github.com/stretchr/testify/require" ) @@ -28,7 +28,7 @@ func TestSnapshot(t *testing.T) { fakeIngesterQuery(ctx) res := Snapshot(ctx, 2*time.Second) - res.Log(util.Logger) + res.Log(util_log.Logger) expected := Result{ Ingester: Ingester{ TotalChunksMatched: 200, diff --git a/pkg/logql/stats/grpc.go b/pkg/logql/stats/grpc.go index 0dcd8c403c8e..0c9239987b7e 100644 --- a/pkg/logql/stats/grpc.go +++ b/pkg/logql/stats/grpc.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" jsoniter "github.com/json-iterator/go" @@ -48,7 +47,7 @@ func CollectTrailer(ctx context.Context) grpc.CallOption { func SendAsTrailer(ctx context.Context, stream grpc.ServerStream) { trailer, err := encodeTrailer(ctx) if err != nil { - level.Warn(util_log.WithContext(ctx, util.Logger)).Log("msg", "failed to encode trailer", "err", err) + level.Warn(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "failed to encode trailer", "err", err) return } stream.SetTrailer(trailer) @@ -111,7 +110,7 @@ func decodeTrailers(ctx context.Context) Result { } func decodeTrailer(ctx context.Context, meta *metadata.MD) Result { - logger := util_log.WithContext(ctx, util.Logger) + logger := util_log.WithContext(ctx, util_log.Logger) var ingData IngesterData values := meta.Get(ingesterDataKey) if len(values) == 1 { diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 7634809d8ce0..59a97f2e6c08 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -23,6 +23,7 @@ import ( cortex_ruler "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" @@ -239,8 +240,8 @@ func (t *Loki) Run() error { t.Server.HTTP.Path("/config").HandlerFunc(configHandler(t.cfg, newDefaultConfig())) // Let's listen for events from this manager, and log them. - healthy := func() { level.Info(util.Logger).Log("msg", "Loki started") } - stopped := func() { level.Info(util.Logger).Log("msg", "Loki stopped") } + healthy := func() { level.Info(util_log.Logger).Log("msg", "Loki started") } + stopped := func() { level.Info(util_log.Logger).Log("msg", "Loki stopped") } serviceFailed := func(service services.Service) { // if any service fails, stop entire Loki sm.StopAsync() @@ -249,15 +250,15 @@ func (t *Loki) Run() error { for m, s := range serviceMap { if s == service { if service.FailureCase() == util.ErrStopProcess { - level.Info(util.Logger).Log("msg", "received stop signal via return error", "module", m, "error", service.FailureCase()) + level.Info(util_log.Logger).Log("msg", "received stop signal via return error", "module", m, "error", service.FailureCase()) } else { - level.Error(util.Logger).Log("msg", "module failed", "module", m, "error", service.FailureCase()) + level.Error(util_log.Logger).Log("msg", "module failed", "module", m, "error", service.FailureCase()) } return } } - level.Error(util.Logger).Log("msg", "module failed", "module", "unknown", "error", service.FailureCase()) + level.Error(util_log.Logger).Log("msg", "module failed", "module", "unknown", "error", service.FailureCase()) } sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 6260148e38ca..2f5449bbe988 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -28,7 +28,6 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" cortex_ruler "github.com/cortexproject/cortex/pkg/ruler" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" @@ -174,8 +173,8 @@ func (t *Loki) initQuerier() (services.Service, error) { // In case someone set scheduler address, we ignore it. t.cfg.Worker.SchedulerAddress = "" t.cfg.Worker.MaxConcurrentRequests = t.cfg.Querier.MaxConcurrent - level.Debug(util.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker)) - worker, err = cortex_querier_worker.NewQuerierWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.Server.HTTPServer.Handler), util.Logger, prometheus.DefaultRegisterer) + level.Debug(util_log.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker)) + worker, err = cortex_querier_worker.NewQuerierWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.Server.HTTPServer.Handler), util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -250,7 +249,7 @@ func (t *Loki) initTableManager() (services.Service, error) { t.cfg.TableManager.ChunkTables.InactiveReadScale.Enabled || t.cfg.TableManager.IndexTables.InactiveReadScale.Enabled) && t.cfg.StorageConfig.AWSStorageConfig.Metrics.URL == "" { - level.Error(util.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") + level.Error(util_log.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") os.Exit(1) } @@ -306,7 +305,7 @@ func (t *Loki) initStore() (_ services.Service, err error) { } } - chunkStore, err := cortex_storage.NewStore(t.cfg.StorageConfig.Config, t.cfg.ChunkStoreConfig, t.cfg.SchemaConfig.SchemaConfig, t.overrides, prometheus.DefaultRegisterer, nil, util.Logger) + chunkStore, err := cortex_storage.NewStore(t.cfg.StorageConfig.Config, t.cfg.ChunkStoreConfig, t.cfg.SchemaConfig.SchemaConfig, t.overrides, prometheus.DefaultRegisterer, nil, util_log.Logger) if err != nil { return } @@ -364,7 +363,7 @@ type disabledShuffleShardingLimits struct{} func (disabledShuffleShardingLimits) MaxQueriersPerUser(userID string) int { return 0 } func (t *Loki) initQueryFrontend() (_ services.Service, err error) { - level.Debug(util.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend)) + level.Debug(util_log.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend)) roundTripper, frontendV1, _, err := frontend.InitFrontend(frontend.CombinedFrontendConfig{ // Don't set FrontendV2 field to make sure that only frontendV1 can be initialized. @@ -372,7 +371,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { FrontendV1: t.cfg.Frontend.FrontendV1, CompressResponses: t.cfg.Frontend.CompressResponses, DownstreamURL: t.cfg.Frontend.DownstreamURL, - }, disabledShuffleShardingLimits{}, t.cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) + }, disabledShuffleShardingLimits{}, t.cfg.Server.GRPCListenPort, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -381,13 +380,13 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { frontendv1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend) } - level.Debug(util.Logger).Log("msg", "initializing query range tripperware", + level.Debug(util_log.Logger).Log("msg", "initializing query range tripperware", "config", fmt.Sprintf("%+v", t.cfg.QueryRange), "limits", fmt.Sprintf("%+v", t.cfg.LimitsConfig), ) tripperware, stopper, err := queryrange.NewTripperware( t.cfg.QueryRange, - util.Logger, + util_log.Logger, t.overrides, t.cfg.SchemaConfig.SchemaConfig, t.cfg.Querier.QueryIngestersWithin, @@ -399,7 +398,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { t.stopper = stopper roundTripper = tripperware(roundTripper) - frontendHandler := transport.NewHandler(t.cfg.Frontend.Handler, roundTripper, util.Logger, prometheus.DefaultRegisterer) + frontendHandler := transport.NewHandler(t.cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer) if t.cfg.Frontend.CompressResponses { frontendHandler = gziphandler.GzipHandler(frontendHandler) } @@ -464,7 +463,7 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) { // to determine if it's unconfigured. the following check, however, correctly tests this. // Single binary integration tests will break if this ever drifts if t.cfg.Target == All && t.cfg.Ruler.StoreConfig.IsDefaults() { - level.Info(util.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.") + level.Info(util_log.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.") return } @@ -489,7 +488,7 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) { func (t *Loki) initRuler() (_ services.Service, err error) { if t.RulerStorage == nil { - level.Info(util.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.") + level.Info(util_log.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.") return nil, nil } @@ -506,7 +505,7 @@ func (t *Loki) initRuler() (_ services.Service, err error) { t.cfg.Ruler, engine, prometheus.DefaultRegisterer, - util.Logger, + util_log.Logger, t.RulerStorage, t.overrides, ) @@ -553,7 +552,7 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { ring.GetCodec(), } - t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, util.Logger) + t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, util_log.Logger) return t.memberlistKV, nil } diff --git a/pkg/promtail/client/logger_test.go b/pkg/promtail/client/logger_test.go index 825a2a7bd8fb..7bee8d7a7ecc 100644 --- a/pkg/promtail/client/logger_test.go +++ b/pkg/promtail/client/logger_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" cortexflag "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -16,10 +16,10 @@ import ( ) func TestNewLogger(t *testing.T) { - _, err := NewLogger(nil, util.Logger, flagext.LabelSet{}, []Config{}...) + _, err := NewLogger(nil, util_log.Logger, flagext.LabelSet{}, []Config{}...) require.Error(t, err) - l, err := NewLogger(nil, util.Logger, flagext.LabelSet{}, []Config{{URL: cortexflag.URLValue{URL: &url.URL{Host: "string"}}}}...) + l, err := NewLogger(nil, util_log.Logger, flagext.LabelSet{}, []Config{{URL: cortexflag.URLValue{URL: &url.URL{Host: "string"}}}}...) require.NoError(t, err) l.Chan() <- api.Entry{Labels: model.LabelSet{"foo": "bar"}, Entry: logproto.Entry{Timestamp: time.Now(), Line: "entry"}} l.Stop() diff --git a/pkg/promtail/client/multi_test.go b/pkg/promtail/client/multi_test.go index 903252203d6e..9d4e0a82d0a0 100644 --- a/pkg/promtail/client/multi_test.go +++ b/pkg/promtail/client/multi_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -19,7 +19,7 @@ import ( ) func TestNewMulti(t *testing.T) { - _, err := NewMulti(nil, util.Logger, lokiflag.LabelSet{}, []Config{}...) + _, err := NewMulti(nil, util_log.Logger, lokiflag.LabelSet{}, []Config{}...) if err == nil { t.Fatal("expected err but got nil") } @@ -38,7 +38,7 @@ func TestNewMulti(t *testing.T) { ExternalLabels: lokiflag.LabelSet{LabelSet: model.LabelSet{"hi": "there"}}, } - clients, err := NewMulti(prometheus.DefaultRegisterer, util.Logger, lokiflag.LabelSet{LabelSet: model.LabelSet{"order": "command"}}, cc1, cc2) + clients, err := NewMulti(prometheus.DefaultRegisterer, util_log.Logger, lokiflag.LabelSet{LabelSet: model.LabelSet{"order": "command"}}, cc1, cc2) if err != nil { t.Fatalf("expected err: nil got:%v", err) } diff --git a/pkg/promtail/positions/positions_test.go b/pkg/promtail/positions/positions_test.go index 0919c62cf912..a3fe6ca9f1ed 100644 --- a/pkg/promtail/positions/positions_test.go +++ b/pkg/promtail/positions/positions_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/stretchr/testify/require" ) @@ -151,7 +151,7 @@ func Test_ReadOnly(t *testing.T) { if err != nil { t.Fatal(err) } - p, err := New(util.Logger, Config{ + p, err := New(util_log.Logger, Config{ SyncPeriod: 20 * time.Nanosecond, PositionsFile: temp, ReadOnly: true, diff --git a/pkg/promtail/promtail.go b/pkg/promtail/promtail.go index 777df8c1cc4d..cb59014914ed 100644 --- a/pkg/promtail/promtail.go +++ b/pkg/promtail/promtail.go @@ -3,7 +3,7 @@ package promtail import ( "sync" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" @@ -48,7 +48,7 @@ func New(cfg config.Config, dryRun bool, opts ...Option) (*Promtail, error) { // Initialize promtail with some defaults and allow the options to override // them. promtail := &Promtail{ - logger: util.Logger, + logger: util_log.Logger, reg: prometheus.DefaultRegisterer, } for _, o := range opts { diff --git a/pkg/promtail/promtail_test.go b/pkg/promtail/promtail_test.go index da148086dd8d..d284f4d82142 100644 --- a/pkg/promtail/promtail_test.go +++ b/pkg/promtail/promtail_test.go @@ -16,6 +16,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -46,7 +47,7 @@ func TestPromtail(t *testing.T) { w := log.NewSyncWriter(os.Stderr) logger := log.NewLogfmtLogger(w) logger = level.NewFilter(logger, level.AllowInfo()) - util.Logger = logger + util_log.Logger = logger initRandom() dirName := "/tmp/promtail_test_" + randName() @@ -428,7 +429,7 @@ func waitForEntries(timeoutSec int, handler *testServerHandler, expectedCounts m if rcvd, ok := handler.receivedMap[file]; !ok || len(rcvd) != expectedCount { waiting = waiting + " " + file for _, e := range rcvd { - level.Info(util.Logger).Log("file", file, "entry", e.Line) + level.Info(util_log.Logger).Log("file", file, "entry", e.Line) } } } diff --git a/pkg/promtail/targets/lokipush/pushtarget.go b/pkg/promtail/targets/lokipush/pushtarget.go index fce793dcebe5..9c3ea6d936ca 100644 --- a/pkg/promtail/targets/lokipush/pushtarget.go +++ b/pkg/promtail/targets/lokipush/pushtarget.go @@ -6,7 +6,7 @@ import ( "strings" "time" - cortex_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/imdario/mergo" @@ -81,7 +81,7 @@ func (t *PushTarget) run() error { // We don't want the /debug and /metrics endpoints running t.config.Server.RegisterInstrumentation = false - cortex_util.InitLogger(&t.config.Server) + util_log.InitLogger(&t.config.Server) srv, err := server.New(t.config.Server) if err != nil { diff --git a/pkg/promtail/targets/stdin/stdin_target_manager_test.go b/pkg/promtail/targets/stdin/stdin_target_manager_test.go index b23e33ca7546..9dd05aa05af1 100644 --- a/pkg/promtail/targets/stdin/stdin_target_manager_test.go +++ b/pkg/promtail/targets/stdin/stdin_target_manager_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -80,7 +80,7 @@ func Test_newReaderTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := fake.New(func() {}) - got, err := newReaderTarget(prometheus.DefaultRegisterer, util.Logger, tt.in, c, tt.cfg) + got, err := newReaderTarget(prometheus.DefaultRegisterer, util_log.Logger, tt.in, c, tt.cfg) if (err != nil) != tt.wantErr { t.Errorf("newReaderTarget() error = %v, wantErr %v", err, tt.wantErr) return @@ -120,7 +120,7 @@ func Test_Shutdown(t *testing.T) { stdIn = newFakeStdin("line") appMock := &mockShutdownable{called: make(chan bool, 1)} recorder := fake.New(func() {}) - manager, err := NewStdinTargetManager(prometheus.DefaultRegisterer, util.Logger, appMock, recorder, []scrapeconfig.Config{{}}) + manager, err := NewStdinTargetManager(prometheus.DefaultRegisterer, util_log.Logger, appMock, recorder, []scrapeconfig.Config{{}}) require.NoError(t, err) require.NotNil(t, manager) require.Equal(t, true, <-appMock.called) @@ -140,12 +140,12 @@ func compareEntries(t *testing.T, expected, actual []api.Entry) { func Test_StdinConfigs(t *testing.T) { // should take the first config - require.Equal(t, scrapeconfig.DefaultScrapeConfig, getStdinConfig(util.Logger, []scrapeconfig.Config{ + require.Equal(t, scrapeconfig.DefaultScrapeConfig, getStdinConfig(util_log.Logger, []scrapeconfig.Config{ scrapeconfig.DefaultScrapeConfig, {}, })) // or use the default if none if provided - require.Equal(t, defaultStdInCfg, getStdinConfig(util.Logger, []scrapeconfig.Config{})) + require.Equal(t, defaultStdInCfg, getStdinConfig(util_log.Logger, []scrapeconfig.Config{})) } var stagesConfig = ` diff --git a/pkg/promtail/targets/windows/target.go b/pkg/promtail/targets/windows/target.go index 30734ef977c6..0048a378d758 100755 --- a/pkg/promtail/targets/windows/target.go +++ b/pkg/promtail/targets/windows/target.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" @@ -112,7 +112,7 @@ func (t *Target) loop() { if err != nil { if err != win_eventlog.ERROR_NO_MORE_ITEMS { t.err = err - level.Error(util.Logger).Log("msg", "error fetching events", "err", err) + level.Error(util_log.Logger).Log("msg", "error fetching events", "err", err) } break loop } @@ -122,7 +122,7 @@ func (t *Target) loop() { t.handler.Chan() <- entry if err := t.bm.save(handles[i]); err != nil { t.err = err - level.Error(util.Logger).Log("msg", "error saving bookmark", "err", err) + level.Error(util_log.Logger).Log("msg", "error saving bookmark", "err", err) } } win_eventlog.Close(handles) diff --git a/pkg/promtail/targets/windows/target_test.go b/pkg/promtail/targets/windows/target_test.go index b23dfc5ca816..be857c73141b 100755 --- a/pkg/promtail/targets/windows/target_test.go +++ b/pkg/promtail/targets/windows/target_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/spf13/afero" @@ -26,7 +26,7 @@ func init() { // Enable debug logging cfg := &server.Config{} _ = cfg.LogLevel.Set("debug") - util.InitLogger(cfg) + util_log.InitLogger(cfg) } // Test that you can use to generate event logs locally. @@ -57,7 +57,7 @@ func Test_GetCreateBookrmark(t *testing.T) { } client := fake.New(func() {}) defer client.Stop() - ta, err := New(util.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{ + ta, err := New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{ BoorkmarkPath: "c:foo.xml", PollInterval: time.Microsecond, Query: ` @@ -91,7 +91,7 @@ func Test_GetCreateBookrmark(t *testing.T) { client = fake.New(func() {}) defer client.Stop() - ta, err = New(util.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{ + ta, err = New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{ BoorkmarkPath: "c:foo.xml", PollInterval: time.Microsecond, Query: ` @@ -120,7 +120,7 @@ func Test_GetCreateBookrmark(t *testing.T) { func Test_renderEntries(t *testing.T) { client := fake.New(func() {}) defer client.Stop() - ta, err := New(util.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{ + ta, err := New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{ Labels: model.LabelSet{"job": "windows-events"}, EventlogName: "Application", Query: "*", diff --git a/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go b/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go index 0ccf0fa6ae5a..982e2ace6697 100755 --- a/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go +++ b/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go @@ -32,7 +32,7 @@ import ( "strings" "syscall" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -365,7 +365,7 @@ func EvtSubscribe(logName, xquery string) (EvtHandle, error) { if err != nil { return 0, err } - level.Debug(util.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle) + level.Debug(util_log.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle) return subsHandle, nil } @@ -394,7 +394,7 @@ func EvtSubscribeWithBookmark(logName, xquery string, bookMark EvtHandle) (EvtHa if err != nil { return 0, err } - level.Debug(util.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle) + level.Debug(util_log.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle) return subsHandle, nil } diff --git a/pkg/querier/http.go b/pkg/querier/http.go index b7db2456172a..e265099a67a4 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -5,7 +5,6 @@ import ( "net/http" "time" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/gorilla/websocket" @@ -199,7 +198,7 @@ func (q *Querier) TailHandler(w http.ResponseWriter, r *http.Request) { upgrader := websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { return true }, } - logger := util_log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) req, err := loghttp.ParseTailQuery(r) if err != nil { diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go index b7cf704f22cd..81c7c7de2249 100644 --- a/pkg/querier/ingester_querier.go +++ b/pkg/querier/ingester_querier.go @@ -9,7 +9,7 @@ import ( cortex_distributor "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/pkg/errors" "github.com/prometheus/common/model" @@ -48,7 +48,7 @@ func NewIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryD func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, clientFactory ring_client.PoolFactory) (*IngesterQuerier, error) { iq := IngesterQuerier{ ring: ring, - pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger), + pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ring, clientFactory, util_log.Logger), extraQueryDelay: extraQueryDelay, } @@ -74,7 +74,7 @@ func (q *IngesterQuerier) forAllIngesters(ctx context.Context, f func(logproto.Q // forGivenIngesters runs f, in parallel, for given ingesters // TODO taken from Cortex, see if we can refactor out an usable interface. func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { - results, err := replicationSet.Do(ctx, q.extraQueryDelay, func(ctx context.Context, ingester *ring.IngesterDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, q.extraQueryDelay, func(ctx context.Context, ingester *ring.InstanceDesc) (interface{}, error) { client, err := q.pool.GetClientFor(ingester.Addr) if err != nil { return nil, err @@ -175,7 +175,7 @@ func (q *IngesterQuerier) TailDisconnectedIngesters(ctx context.Context, req *lo } // Look for disconnected ingesters or new one we should (re)connect to - reconnectIngesters := []ring.IngesterDesc{} + reconnectIngesters := []ring.InstanceDesc{} for _, ingester := range replicationSet.Ingesters { if _, ok := connected[ingester.Addr]; ok { @@ -232,7 +232,7 @@ func (q *IngesterQuerier) TailersCount(ctx context.Context) ([]uint32, error) { } // we want to check count of active tailers with only active ingesters - ingesters := make([]ring.IngesterDesc, 0, 1) + ingesters := make([]ring.InstanceDesc, 0, 1) for i := range replicationSet.Ingesters { if replicationSet.Ingesters[i].State == ring.ACTIVE { ingesters = append(ingesters, replicationSet.Ingesters[i]) diff --git a/pkg/querier/ingester_querier_test.go b/pkg/querier/ingester_querier_test.go index 5800e225cb30..0c52a26cea49 100644 --- a/pkg/querier/ingester_querier_test.go +++ b/pkg/querier/ingester_querier_test.go @@ -19,47 +19,47 @@ func TestQuerier_tailDisconnectedIngesters(t *testing.T) { tests := map[string]struct { connectedIngestersAddr []string - ringIngesters []ring.IngesterDesc + ringIngesters []ring.InstanceDesc expectedClientsAddr []string }{ "no connected ingesters and empty ring": { connectedIngestersAddr: []string{}, - ringIngesters: []ring.IngesterDesc{}, + ringIngesters: []ring.InstanceDesc{}, expectedClientsAddr: []string{}, }, "no connected ingesters and ring containing new ingesters": { connectedIngestersAddr: []string{}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)}, expectedClientsAddr: []string{"1.1.1.1"}, }, "connected ingesters and ring contain the same ingesters": { connectedIngestersAddr: []string{"1.1.1.1", "2.2.2.2"}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("2.2.2.2", ring.ACTIVE), mockIngesterDesc("1.1.1.1", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("2.2.2.2", ring.ACTIVE), mockInstanceDesc("1.1.1.1", ring.ACTIVE)}, expectedClientsAddr: []string{}, }, "ring contains new ingesters compared to the connected one": { connectedIngestersAddr: []string{"1.1.1.1"}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("2.2.2.2", ring.ACTIVE), mockIngesterDesc("3.3.3.3", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("2.2.2.2", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, expectedClientsAddr: []string{"2.2.2.2", "3.3.3.3"}, }, "connected ingesters contain ingesters not in the ring anymore": { connectedIngestersAddr: []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("3.3.3.3", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)}, expectedClientsAddr: []string{}, }, "connected ingesters contain ingesters not in the ring anymore and the ring contains new ingesters too": { connectedIngestersAddr: []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("3.3.3.3", ring.ACTIVE), mockIngesterDesc("4.4.4.4", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE), mockInstanceDesc("4.4.4.4", ring.ACTIVE)}, expectedClientsAddr: []string{"4.4.4.4"}, }, "ring contains ingester in LEAVING state not listed in the connected ingesters": { connectedIngestersAddr: []string{"1.1.1.1"}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("2.2.2.2", ring.LEAVING)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("2.2.2.2", ring.LEAVING)}, expectedClientsAddr: []string{}, }, "ring contains ingester in PENDING state not listed in the connected ingesters": { connectedIngestersAddr: []string{"1.1.1.1"}, - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("2.2.2.2", ring.PENDING)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("2.2.2.2", ring.PENDING)}, expectedClientsAddr: []string{}, }, } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 4afe8369615e..6a452c7ab237 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -287,7 +287,7 @@ type readRingMock struct { replicationSet ring.ReplicationSet } -func newReadRingMock(ingesters []ring.IngesterDesc) *readRingMock { +func newReadRingMock(ingesters []ring.InstanceDesc) *readRingMock { return &readRingMock{ replicationSet: ring.ReplicationSet{ Ingesters: ingesters, @@ -302,7 +302,7 @@ func (r *readRingMock) Describe(ch chan<- *prometheus.Desc) { func (r *readRingMock) Collect(ch chan<- prometheus.Metric) { } -func (r *readRingMock) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc, _ []string, _ []string) (ring.ReplicationSet, error) { +func (r *readRingMock) Get(key uint32, op ring.Operation, buf []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) { return r.replicationSet, nil } @@ -352,13 +352,13 @@ func (r *readRingMock) ShuffleShardWithLookback(identifier string, size int, loo } func mockReadRingWithOneActiveIngester() *readRingMock { - return newReadRingMock([]ring.IngesterDesc{ + return newReadRingMock([]ring.InstanceDesc{ {Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}}, }) } -func mockIngesterDesc(addr string, state ring.IngesterState) ring.IngesterDesc { - return ring.IngesterDesc{ +func mockInstanceDesc(addr string, state ring.IngesterState) ring.InstanceDesc { + return ring.InstanceDesc{ Addr: addr, Timestamp: time.Now().UnixNano(), State: state, diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 6fcbd05c3eca..66c654fada96 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -453,31 +453,31 @@ func TestQuerier_concurrentTailLimits(t *testing.T) { t.Parallel() tests := map[string]struct { - ringIngesters []ring.IngesterDesc + ringIngesters []ring.InstanceDesc expectedError error tailersCount uint32 }{ "empty ring": { - ringIngesters: []ring.IngesterDesc{}, + ringIngesters: []ring.InstanceDesc{}, expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found"), }, "ring containing one pending ingester": { - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.PENDING)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.PENDING)}, expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found"), }, "ring containing one active ingester and 0 active tailers": { - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)}, }, "ring containing one active ingester and 1 active tailer": { - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)}, tailersCount: 1, }, "ring containing one pending and active ingester with 1 active tailer": { - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.PENDING), mockIngesterDesc("2.2.2.2", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.PENDING), mockInstanceDesc("2.2.2.2", ring.ACTIVE)}, tailersCount: 1, }, "ring containing one active ingester and max active tailers": { - ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)}, + ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)}, expectedError: httpgrpc.Errorf(http.StatusBadRequest, "max concurrent tail requests limit exceeded, count > limit (%d > %d)", 6, 5), tailersCount: 5, diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index cbdaaa562d43..0b42ba8f5f49 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -10,7 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/querier/queryrange" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/stretchr/testify/require" @@ -62,7 +62,7 @@ func Test_seriesLimiter(t *testing.T) { cfg.SplitQueriesByInterval = time.Hour cfg.CacheResults = false // split in 6 with 4 in // max. - tpw, stopper, err := NewTripperware(cfg, util.Logger, fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(cfg, util_log.Logger, fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index aaaafc302ade..d15a8ea1488b 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -16,7 +16,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/querier/queryrange" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -92,7 +92,7 @@ var ( // those tests are mostly for testing the glue between all component and make sure they activate correctly. func TestMetricsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{maxSeries: math.MaxInt32}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxSeries: math.MaxInt32}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -156,7 +156,7 @@ func TestMetricsTripperware(t *testing.T) { func TestLogFilterTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -204,7 +204,7 @@ func TestLogFilterTripperware(t *testing.T) { func TestSeriesTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -246,7 +246,7 @@ func TestSeriesTripperware(t *testing.T) { func TestLabelsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -292,7 +292,7 @@ func TestLabelsTripperware(t *testing.T) { } func TestLogNoRegex(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -326,7 +326,7 @@ func TestLogNoRegex(t *testing.T) { } func TestUnhandledPath(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -350,7 +350,7 @@ func TestUnhandledPath(t *testing.T) { } func TestRegexpParamsSupport(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -429,7 +429,7 @@ func TestPostQueries(t *testing.T) { } func TestEntriesLimitsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } @@ -460,7 +460,7 @@ func TestEntriesLimitsTripperware(t *testing.T) { } func TestEntriesLimitWithZeroTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) + tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil) if stopper != nil { defer stopper.Stop() } diff --git a/pkg/querier/tail.go b/pkg/querier/tail.go index 97ef6119dd05..78dd6e5d9646 100644 --- a/pkg/querier/tail.go +++ b/pkg/querier/tail.go @@ -5,7 +5,6 @@ import ( "sync" "time" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -82,11 +81,11 @@ func (t *Tailer) loop() { case <-checkConnectionTicker.C: // Try to reconnect dropped ingesters and connect to new ingesters if err := t.checkIngesterConnections(); err != nil { - level.Error(util.Logger).Log("msg", "Error reconnecting to disconnected ingesters", "err", err) + level.Error(util_log.Logger).Log("msg", "Error reconnecting to disconnected ingesters", "err", err) } case <-tailMaxDurationTicker.C: if err := t.close(); err != nil { - level.Error(util.Logger).Log("msg", "Error closing Tailer", "err", err) + level.Error(util_log.Logger).Log("msg", "Error closing Tailer", "err", err) } t.closeErrChan <- errors.New("reached tail max duration limit") return @@ -128,12 +127,12 @@ func (t *Tailer) loop() { if numClients == 0 { // All the connections to ingesters are dropped, try reconnecting or return error if err := t.checkIngesterConnections(); err != nil { - level.Error(util.Logger).Log("msg", "Error reconnecting to ingesters", "err", err) + level.Error(util_log.Logger).Log("msg", "Error reconnecting to ingesters", "err", err) } else { continue } if err := t.close(); err != nil { - level.Error(util.Logger).Log("msg", "Error closing Tailer", "err", err) + level.Error(util_log.Logger).Log("msg", "Error closing Tailer", "err", err) } t.closeErrChan <- errors.New("all ingesters closed the connection") return @@ -199,7 +198,7 @@ func (t *Tailer) readTailClient(addr string, querierTailClient logproto.Querier_ var err error defer t.dropTailClient(addr) - logger := util_log.WithContext(querierTailClient.Context(), util.Logger) + logger := util_log.WithContext(querierTailClient.Context(), util_log.Logger) for { if t.stopped { if err := querierTailClient.CloseSend(); err != nil { diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go index 35fc38a08a10..e80bacb53759 100644 --- a/pkg/storage/async_store.go +++ b/pkg/storage/async_store.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - pkg_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/go-kit/kit/log/level" @@ -55,7 +55,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro if a.queryIngestersWithin != 0 { // don't query ingesters if the query does not overlap with queryIngestersWithin. if !through.After(model.Now().Add(-a.queryIngestersWithin)) { - level.Debug(pkg_util.Logger).Log("msg", "skipping querying ingesters for chunk ids", "query-from", from, "query-through", through) + level.Debug(util_log.Logger).Log("msg", "skipping querying ingesters for chunk ids", "query-from", from, "query-through", through) errs <- nil return } @@ -66,7 +66,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro if err == nil { level.Debug(spanLogger).Log("ingester-chunks-count", len(ingesterChunks)) - level.Debug(pkg_util.Logger).Log("msg", "got chunk ids from ingester", "count", len(ingesterChunks)) + level.Debug(util_log.Logger).Log("msg", "got chunk ids from ingester", "count", len(ingesterChunks)) } errs <- err }() @@ -87,7 +87,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro func (a *AsyncStore) mergeIngesterAndStoreChunks(userID string, storeChunks [][]chunk.Chunk, fetchers []*chunk.Fetcher, ingesterChunkIDs []string) ([][]chunk.Chunk, []*chunk.Fetcher, error) { ingesterChunkIDs = filterDuplicateChunks(storeChunks, ingesterChunkIDs) - level.Debug(pkg_util.Logger).Log("msg", "post-filtering ingester chunks", "count", len(ingesterChunkIDs)) + level.Debug(util_log.Logger).Log("msg", "post-filtering ingester chunks", "count", len(ingesterChunkIDs)) fetcherToChunksGroupIdx := make(map[*chunk.Fetcher]int, len(fetchers)) diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go index aacbdcf1cd06..00a2bc969072 100644 --- a/pkg/storage/batch.go +++ b/pkg/storage/batch.go @@ -7,7 +7,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -664,9 +664,9 @@ func fetchLazyChunks(ctx context.Context, chunks []*LazyChunk) error { } chks, err := fetcher.FetchChunks(ctx, chks, keys) if err != nil { - level.Error(util.Logger).Log("msg", "error fetching chunks", "err", err) + level.Error(util_log.Logger).Log("msg", "error fetching chunks", "err", err) if isInvalidChunkError(err) { - level.Error(util.Logger).Log("msg", "checksum of chunks does not match", "err", chunk.ErrInvalidChecksum) + level.Error(util_log.Logger).Log("msg", "checksum of chunks does not match", "err", chunk.ErrInvalidChecksum) errChan <- nil return } diff --git a/pkg/storage/hack/main.go b/pkg/storage/hack/main.go index 0a086b336812..2b05362ad3ae 100644 --- a/pkg/storage/hack/main.go +++ b/pkg/storage/hack/main.go @@ -18,7 +18,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/local" "github.com/cortexproject/cortex/pkg/chunk/storage" "github.com/cortexproject/cortex/pkg/ingester/client" - cortex_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/logproto" @@ -74,7 +74,7 @@ func getStore() (lstore.Store, error) { &validation.Overrides{}, prometheus.DefaultRegisterer, nil, - cortex_util.Logger, + util_log.Logger, ) if err != nil { return nil, err diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 132f33de6b4e..5bd9ff441525 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - cortex_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/stretchr/testify/assert" @@ -219,7 +219,7 @@ func getLocalStore() Store { chunkStore, err := storage.NewStore( storeConfig.Config, chunk.StoreConfig{}, - schemaConfig.SchemaConfig, limits, nil, nil, cortex_util.Logger) + schemaConfig.SchemaConfig, limits, nil, nil, util_log.Logger) if err != nil { panic(err) @@ -808,7 +808,7 @@ func TestStore_MultipleBoltDBShippersInConfig(t *testing.T) { limits, nil, nil, - cortex_util.Logger, + util_log.Logger, ) require.NoError(t, err) store, err := NewStore(config, schemaConfig, chunkStore, nil) @@ -854,7 +854,7 @@ func TestStore_MultipleBoltDBShippersInConfig(t *testing.T) { limits, nil, nil, - cortex_util.Logger, + util_log.Logger, ) require.NoError(t, err) diff --git a/pkg/storage/stores/shipper/compactor/compactor.go b/pkg/storage/stores/shipper/compactor/compactor.go index 7953af52189f..56c04902a26d 100644 --- a/pkg/storage/stores/shipper/compactor/compactor.go +++ b/pkg/storage/stores/shipper/compactor/compactor.go @@ -12,7 +12,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/storage" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - pkg_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -80,7 +80,7 @@ func (c *Compactor) loop(ctx context.Context) error { runCompaction := func() { err := c.Run(ctx) if err != nil { - level.Error(pkg_util.Logger).Log("msg", "failed to run compaction", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to run compaction", "err", err) } } @@ -126,14 +126,14 @@ func (c *Compactor) Run(ctx context.Context) error { table, err := newTable(ctx, filepath.Join(c.cfg.WorkingDirectory, tableName), c.objectClient) if err != nil { status = statusFailure - level.Error(pkg_util.Logger).Log("msg", "failed to initialize table for compaction", "table", tableName, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to initialize table for compaction", "table", tableName, "err", err) continue } err = table.compact() if err != nil { status = statusFailure - level.Error(pkg_util.Logger).Log("msg", "failed to compact files", "table", tableName, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to compact files", "table", tableName, "err", err) } // check if context was cancelled before going for next table. diff --git a/pkg/storage/stores/shipper/compactor/table.go b/pkg/storage/stores/shipper/compactor/table.go index b0c139426098..598bad2f2c50 100644 --- a/pkg/storage/stores/shipper/compactor/table.go +++ b/pkg/storage/stores/shipper/compactor/table.go @@ -10,7 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" util_math "github.com/cortexproject/cortex/pkg/util/math" "github.com/go-kit/kit/log/level" "go.etcd.io/bbolt" @@ -68,17 +68,17 @@ func (t *table) compact() error { return err } - level.Info(util.Logger).Log("msg", "listed files", "count", len(objects)) + level.Info(util_log.Logger).Log("msg", "listed files", "count", len(objects)) if len(objects) < compactMinDBs { - level.Info(util.Logger).Log("msg", fmt.Sprintf("skipping compaction since we have just %d files in storage", len(objects))) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("skipping compaction since we have just %d files in storage", len(objects))) return nil } defer func() { err := t.cleanup() if err != nil { - level.Error(util.Logger).Log("msg", "failed to cleanup table", "name", t.name) + level.Error(util_log.Logger).Log("msg", "failed to cleanup table", "name", t.name) } }() @@ -87,7 +87,7 @@ func (t *table) compact() error { return err } - level.Info(util.Logger).Log("msg", "starting compaction of dbs") + level.Info(util_log.Logger).Log("msg", "starting compaction of dbs") errChan := make(chan error) readObjectChan := make(chan string) @@ -123,7 +123,7 @@ func (t *table) compact() error { err = t.readFile(downloadAt) if err != nil { - level.Error(util.Logger).Log("msg", "error reading file", "err", err) + level.Error(util_log.Logger).Log("msg", "error reading file", "err", err) return } case <-t.quit: @@ -148,7 +148,7 @@ func (t *table) compact() error { } } - level.Debug(util.Logger).Log("msg", "closing readObjectChan") + level.Debug(util_log.Logger).Log("msg", "closing readObjectChan") close(readObjectChan) }() @@ -175,7 +175,7 @@ func (t *table) compact() error { default: } - level.Info(util.Logger).Log("msg", "finished compacting the dbs") + level.Info(util_log.Logger).Log("msg", "finished compacting the dbs") // upload the compacted db err = t.upload() @@ -219,7 +219,7 @@ func (t *table) writeBatch(batch []indexEntry) error { // readFile reads a boltdb file from a path and writes the index in batched mode to compactedDB func (t *table) readFile(path string) error { - level.Debug(util.Logger).Log("msg", "reading file for compaction", "path", path) + level.Debug(util_log.Logger).Log("msg", "reading file for compaction", "path", path) db, err := shipper_util.SafeOpenBoltdbFile(path) if err != nil { @@ -228,11 +228,11 @@ func (t *table) readFile(path string) error { defer func() { if err := db.Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close db", "path", path, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close db", "path", path, "err", err) } if err = os.Remove(path); err != nil { - level.Error(util.Logger).Log("msg", "failed to remove file", "path", path, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to remove file", "path", path, "err", err) } }() @@ -305,23 +305,23 @@ func (t *table) upload() error { defer func() { if err := compressedDB.Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close file", "path", compactedDBPath, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close file", "path", compactedDBPath, "err", err) } if err := os.Remove(compressedDBPath); err != nil { - level.Error(util.Logger).Log("msg", "failed to remove file", "path", compressedDBPath, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to remove file", "path", compressedDBPath, "err", err) } }() objectKey := fmt.Sprintf("%s.gz", shipper_util.BuildObjectKey(t.name, uploaderName, fmt.Sprint(time.Now().Unix()))) - level.Info(util.Logger).Log("msg", "uploading the compacted file", "objectKey", objectKey) + level.Info(util_log.Logger).Log("msg", "uploading the compacted file", "objectKey", objectKey) return t.storageClient.PutObject(t.ctx, objectKey, compressedDB) } // removeObjectsFromStorage deletes objects from storage. func (t *table) removeObjectsFromStorage(objects []chunk.StorageObject) error { - level.Info(util.Logger).Log("msg", "removing source db files from storage", "count", len(objects)) + level.Info(util_log.Logger).Log("msg", "removing source db files from storage", "count", len(objects)) for _, object := range objects { err := t.storageClient.DeleteObject(t.ctx, object.Key) diff --git a/pkg/storage/stores/shipper/downloads/table.go b/pkg/storage/stores/shipper/downloads/table.go index 5fb869bbabb3..87ddb64e02e7 100644 --- a/pkg/storage/stores/shipper/downloads/table.go +++ b/pkg/storage/stores/shipper/downloads/table.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" util_math "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/go-kit/kit/log" @@ -90,7 +90,7 @@ func NewTable(spanCtx context.Context, name, cacheLocation string, storageClient // Using background context to avoid cancellation of download when request times out. // We would anyways need the files for serving next requests. if err := table.init(ctx, log); err != nil { - level.Error(util.Logger).Log("msg", "failed to download table", "name", table.name) + level.Error(util_log.Logger).Log("msg", "failed to download table", "name", table.name) } }() @@ -134,7 +134,7 @@ func LoadTable(ctx context.Context, name, cacheLocation string, storageClient St cancelFunc: func() {}, } - level.Debug(util.Logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(filesInfo)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(filesInfo)) for _, fileInfo := range filesInfo { if fileInfo.IsDir() { @@ -144,14 +144,14 @@ func LoadTable(ctx context.Context, name, cacheLocation string, storageClient St // if we fail to open a boltdb file, lets skip it and let sync operation re-download the file from storage. boltdb, err := shipper_util.SafeOpenBoltdbFile(filepath.Join(folderPath, fileInfo.Name())) if err != nil { - level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to open existing boltdb file %s, continuing without it to let the sync operation catch up", filepath.Join(folderPath, fileInfo.Name())), "err", err) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to open existing boltdb file %s, continuing without it to let the sync operation catch up", filepath.Join(folderPath, fileInfo.Name())), "err", err) continue } table.dbs[fileInfo.Name()] = boltdb } - level.Debug(util.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", name)) // sync the table to get new files and remove the deleted ones from storage. err = table.Sync(ctx) if err != nil { @@ -173,12 +173,12 @@ func (t *Table) init(ctx context.Context, spanLogger log.Logger) (err error) { status = statusFailure t.err = err - level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to initialize table %s, cleaning it up", t.name), "err", err) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to initialize table %s, cleaning it up", t.name), "err", err) // cleaning up files due to error to avoid returning invalid results. for fileName := range t.dbs { if err := t.cleanupDB(fileName); err != nil { - level.Error(util.Logger).Log("msg", "failed to cleanup partially downloaded file", "filename", fileName, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to cleanup partially downloaded file", "filename", fileName, "err", err) } } } @@ -195,7 +195,7 @@ func (t *Table) init(ctx context.Context, spanLogger log.Logger) (err error) { return } - level.Debug(util.Logger).Log("msg", fmt.Sprintf("list of files to download for period %s: %s", t.name, objects)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("list of files to download for period %s: %s", t.name, objects)) folderPath, err := t.folderPathForTable(true) if err != nil { @@ -252,7 +252,7 @@ func (t *Table) Close() { for name, db := range t.dbs { if err := db.Close(); err != nil { - level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to close file %s for table %s", name, t.name), "err", err) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to close file %s for table %s", name, t.name), "err", err) } } @@ -363,14 +363,14 @@ func (t *Table) cleanupDB(fileName string) error { // Sync downloads updated and new files from the storage relevant for the table and removes the deleted ones func (t *Table) Sync(ctx context.Context) error { - level.Debug(util.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", t.name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", t.name)) toDownload, toDelete, err := t.checkStorageForUpdates(ctx) if err != nil { return err } - level.Debug(util.Logger).Log("msg", fmt.Sprintf("updates for table %s. toDownload: %s, toDelete: %s", t.name, toDownload, toDelete)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("updates for table %s. toDownload: %s, toDelete: %s", t.name, toDownload, toDelete)) for _, storageObject := range toDownload { err = t.downloadFile(ctx, storageObject) @@ -435,7 +435,7 @@ func (t *Table) checkStorageForUpdates(ctx context.Context) (toDownload []chunk. // downloadFile first downloads file to a temp location so that we can close the existing db(if already exists), replace it with new one and then reopen it. func (t *Table) downloadFile(ctx context.Context, storageObject chunk.StorageObject) error { - level.Info(util.Logger).Log("msg", fmt.Sprintf("downloading object from storage with key %s", storageObject.Key)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("downloading object from storage with key %s", storageObject.Key)) dbName, err := getDBNameFromObjectKey(storageObject.Key) if err != nil { diff --git a/pkg/storage/stores/shipper/downloads/table_manager.go b/pkg/storage/stores/shipper/downloads/table_manager.go index b8549e2b781d..548306b5c47f 100644 --- a/pkg/storage/stores/shipper/downloads/table_manager.go +++ b/pkg/storage/stores/shipper/downloads/table_manager.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - pkg_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -99,18 +99,18 @@ func (tm *TableManager) loop() { case <-syncTicker.C: err := tm.syncTables(tm.ctx) if err != nil { - level.Error(pkg_util.Logger).Log("msg", "error syncing local boltdb files with storage", "err", err) + level.Error(util_log.Logger).Log("msg", "error syncing local boltdb files with storage", "err", err) } // we need to keep ensuring query readiness to download every days new table which would otherwise be downloaded only during queries. err = tm.ensureQueryReadiness() if err != nil { - level.Error(pkg_util.Logger).Log("msg", "error ensuring query readiness of tables", "err", err) + level.Error(util_log.Logger).Log("msg", "error ensuring query readiness of tables", "err", err) } case <-cacheCleanupTicker.C: err := tm.cleanupCache() if err != nil { - level.Error(pkg_util.Logger).Log("msg", "error cleaning up expired tables", "err", err) + level.Error(util_log.Logger).Log("msg", "error cleaning up expired tables", "err", err) } case <-tm.ctx.Done(): return @@ -157,7 +157,7 @@ func (tm *TableManager) query(ctx context.Context, tableName string, queries []c tm.tablesMtx.Lock() defer tm.tablesMtx.Unlock() - level.Error(pkg_util.Logger).Log("msg", fmt.Sprintf("table %s has some problem, cleaning it up", tableName), "err", table.Err()) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("table %s has some problem, cleaning it up", tableName), "err", table.Err()) delete(tm.tables, tableName) return table.Err() @@ -179,7 +179,7 @@ func (tm *TableManager) getOrCreateTable(spanCtx context.Context, tableName stri table, ok = tm.tables[tableName] if !ok { // table not found, creating one. - level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("downloading all files for table %s", tableName)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("downloading all files for table %s", tableName)) table = NewTable(spanCtx, tableName, tm.cfg.CacheDir, tm.storageClient, tm.boltIndexClient, tm.metrics) tm.tables[tableName] = table @@ -205,7 +205,7 @@ func (tm *TableManager) syncTables(ctx context.Context) error { tm.metrics.tablesSyncOperationTotal.WithLabelValues(status).Inc() }() - level.Info(pkg_util.Logger).Log("msg", "syncing tables") + level.Info(util_log.Logger).Log("msg", "syncing tables") for _, table := range tm.tables { err = table.Sync(ctx) @@ -221,12 +221,12 @@ func (tm *TableManager) cleanupCache() error { tm.tablesMtx.Lock() defer tm.tablesMtx.Unlock() - level.Info(pkg_util.Logger).Log("msg", "cleaning tables cache") + level.Info(util_log.Logger).Log("msg", "cleaning tables cache") for name, table := range tm.tables { lastUsedAt := table.LastUsedAt() if lastUsedAt.Add(tm.cfg.CacheTTL).Before(time.Now()) { - level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("cleaning up expired table %s", name)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("cleaning up expired table %s", name)) err := table.CleanupAllDBs() if err != nil { return err @@ -237,7 +237,7 @@ func (tm *TableManager) cleanupCache() error { // remove the directory where files for the table were downloaded. err = os.RemoveAll(path.Join(tm.cfg.CacheDir, name)) if err != nil { - level.Error(pkg_util.Logger).Log("msg", fmt.Sprintf("failed to remove directory for table %s", name), "err", err) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to remove directory for table %s", name), "err", err) } } } @@ -262,7 +262,7 @@ func (tm *TableManager) ensureQueryReadiness() error { return err } - level.Debug(pkg_util.Logger).Log("msg", fmt.Sprintf("list of tables required for query-readiness %s", tableNames)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("list of tables required for query-readiness %s", tableNames)) for _, tableName := range tableNames { tm.tablesMtx.RLock() @@ -274,7 +274,7 @@ func (tm *TableManager) ensureQueryReadiness() error { continue } - level.Info(pkg_util.Logger).Log("msg", "table required for query readiness does not exist locally, downloading it", "table-name", tableName) + level.Info(util_log.Logger).Log("msg", "table required for query readiness does not exist locally, downloading it", "table-name", tableName) // table doesn't exist, download it. table, err := LoadTable(tm.ctx, tableName, tm.cfg.CacheDir, tm.storageClient, tm.boltIndexClient, tm.metrics) if err != nil { @@ -340,7 +340,7 @@ func (tm *TableManager) loadLocalTables() error { continue } - level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("loading local table %s", fileInfo.Name())) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading local table %s", fileInfo.Name())) table, err := LoadTable(tm.ctx, fileInfo.Name(), tm.cfg.CacheDir, tm.storageClient, tm.boltIndexClient, tm.metrics) if err != nil { diff --git a/pkg/storage/stores/shipper/shipper_index_client.go b/pkg/storage/stores/shipper/shipper_index_client.go index d2a3882e200c..a9118dfa26e1 100644 --- a/pkg/storage/stores/shipper/shipper_index_client.go +++ b/pkg/storage/stores/shipper/shipper_index_client.go @@ -13,7 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/local" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - pkg_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -96,7 +96,7 @@ func NewShipper(cfg Config, storageClient chunk.ObjectClient, registerer prometh return nil, err } - level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("starting boltdb shipper in %d mode", cfg.Mode)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("starting boltdb shipper in %d mode", cfg.Mode)) return &shipper, nil } diff --git a/pkg/storage/stores/shipper/table_client.go b/pkg/storage/stores/shipper/table_client.go index aa72c41e18ea..6d2f0661e40e 100644 --- a/pkg/storage/stores/shipper/table_client.go +++ b/pkg/storage/stores/shipper/table_client.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/cortexproject/cortex/pkg/chunk" - cortex_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/grafana/loki/pkg/storage/stores/util" ) @@ -54,7 +54,7 @@ func (b *boltDBShipperTableClient) DeleteTable(ctx context.Context, name string) } if len(dirs) != 0 { - level.Error(cortex_util.Logger).Log("msg", fmt.Sprintf("unexpected directories in %s folder, not touching them", name), "directories", fmt.Sprint(dirs)) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("unexpected directories in %s folder, not touching them", name), "directories", fmt.Sprint(dirs)) } for _, object := range objects { diff --git a/pkg/storage/stores/shipper/uploads/table.go b/pkg/storage/stores/shipper/uploads/table.go index 8520517e8f9f..6d1f9d961232 100644 --- a/pkg/storage/stores/shipper/uploads/table.go +++ b/pkg/storage/stores/shipper/uploads/table.go @@ -16,7 +16,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/local" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "go.etcd.io/bbolt" @@ -116,10 +116,10 @@ func (lt *Table) Snapshot() error { lt.dbSnapshotsMtx.Lock() defer lt.dbSnapshotsMtx.Unlock() - level.Debug(util.Logger).Log("msg", fmt.Sprintf("snapshotting table %s", lt.name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("snapshotting table %s", lt.name)) for name, db := range lt.dbs { - level.Debug(util.Logger).Log("msg", fmt.Sprintf("checking db %s for snapshot", name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("checking db %s for snapshot", name)) srcWriteCount := 0 err := db.View(func(tx *bbolt.Tx) error { srcWriteCount = db.Stats().TxStats.Write @@ -176,10 +176,10 @@ func (lt *Table) Snapshot() error { snapshot.writesCount = srcWriteCount lt.dbSnapshots[name] = snapshot - level.Debug(util.Logger).Log("msg", fmt.Sprintf("finished snaphotting db %s", name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("finished snaphotting db %s", name)) } - level.Debug(util.Logger).Log("msg", fmt.Sprintf("finished snapshotting table %s", lt.name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("finished snapshotting table %s", lt.name)) return nil } @@ -270,7 +270,7 @@ func (lt *Table) Stop() { for name, db := range lt.dbs { if err := db.Close(); err != nil { - level.Error(util.Logger).Log("msg", fmt.Errorf("failed to close file %s for table %s", name, lt.name)) + level.Error(util_log.Logger).Log("msg", fmt.Errorf("failed to close file %s for table %s", name, lt.name)) } } @@ -337,7 +337,7 @@ func (lt *Table) Upload(ctx context.Context, force bool) error { return err } - level.Info(util.Logger).Log("msg", fmt.Sprintf("uploading table %s", lt.name)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("uploading table %s", lt.name)) for name, db := range lt.dbs { // doing string comparison between unix timestamps in string form since they are anyways of same length @@ -364,13 +364,13 @@ func (lt *Table) Upload(ctx context.Context, force bool) error { lt.dbUploadTimeMtx.Unlock() } - level.Info(util.Logger).Log("msg", fmt.Sprintf("finished uploading table %s", lt.name)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("finished uploading table %s", lt.name)) return nil } func (lt *Table) uploadDB(ctx context.Context, name string, db *bbolt.DB) error { - level.Debug(util.Logger).Log("msg", fmt.Sprintf("uploading db %s from table %s", name, lt.name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("uploading db %s from table %s", name, lt.name)) filePath := path.Join(lt.path, fmt.Sprintf("%s%s", name, tempFileSuffix)) f, err := os.Create(filePath) @@ -380,11 +380,11 @@ func (lt *Table) uploadDB(ctx context.Context, name string, db *bbolt.DB) error defer func() { if err := f.Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close temp file", "path", filePath, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close temp file", "path", filePath, "err", err) } if err := os.Remove(filePath); err != nil { - level.Error(util.Logger).Log("msg", "failed to remove temp file", "path", filePath, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to remove temp file", "path", filePath, "err", err) } }() @@ -422,7 +422,7 @@ func (lt *Table) uploadDB(ctx context.Context, name string, db *bbolt.DB) error // Cleanup removes dbs which are already uploaded and have not been modified for period longer than dbRetainPeriod. // This is to avoid keeping all the files forever in the ingesters. func (lt *Table) Cleanup(dbRetainPeriod time.Duration) error { - level.Info(util.Logger).Log("msg", fmt.Sprintf("cleaning up unwanted dbs from table %s", lt.name)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("cleaning up unwanted dbs from table %s", lt.name)) var filesToCleanup []string cutoffTime := time.Now().Add(-dbRetainPeriod) @@ -443,14 +443,14 @@ func (lt *Table) Cleanup(dbRetainPeriod time.Duration) error { lt.dbsMtx.RUnlock() for i := range filesToCleanup { - level.Debug(util.Logger).Log("msg", fmt.Sprintf("removing db %s from table %s", filesToCleanup[i], lt.name)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("removing db %s from table %s", filesToCleanup[i], lt.name)) if err := lt.RemoveDB(filesToCleanup[i]); err != nil { return err } if err := lt.RemoveSnapshotDB(filesToCleanup[i]); err != nil { - level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to remove snapshot db %s", filesToCleanup[i])) + level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to remove snapshot db %s", filesToCleanup[i])) } } @@ -485,7 +485,7 @@ func loadBoltDBsFromDir(dir string) (map[string]*bbolt.DB, error) { // If an ingester is killed abruptly in the middle of an upload operation it could leave out a temp file which holds the snapshot of db for uploading. // Cleaning up those temp files to avoid problems. if err := os.Remove(filepath.Join(dir, fileInfo.Name())); err != nil { - level.Error(util.Logger).Log("msg", "failed to remove temp file", "name", fileInfo.Name(), "err", err) + level.Error(util_log.Logger).Log("msg", "failed to remove temp file", "name", fileInfo.Name(), "err", err) } continue } diff --git a/pkg/storage/stores/shipper/uploads/table_manager.go b/pkg/storage/stores/shipper/uploads/table_manager.go index aff13667fbf5..9860d6b639f6 100644 --- a/pkg/storage/stores/shipper/uploads/table_manager.go +++ b/pkg/storage/stores/shipper/uploads/table_manager.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/local" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - pkg_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -84,7 +84,7 @@ func (tm *TableManager) loop() { } func (tm *TableManager) Stop() { - level.Info(pkg_util.Logger).Log("msg", "stopping table manager") + level.Info(util_log.Logger).Log("msg", "stopping table manager") tm.cancel() tm.wg.Wait() @@ -168,21 +168,21 @@ func (tm *TableManager) uploadTables(ctx context.Context, force bool) { tm.tablesMtx.RLock() defer tm.tablesMtx.RUnlock() - level.Info(pkg_util.Logger).Log("msg", "uploading tables") + level.Info(util_log.Logger).Log("msg", "uploading tables") status := statusSuccess for _, table := range tm.tables { err := table.Snapshot() if err != nil { // we do not want to stop uploading of dbs due to failures in snapshotting them so logging just the error here. - level.Error(pkg_util.Logger).Log("msg", "failed to snapshot table for reads", "table", table.name, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to snapshot table for reads", "table", table.name, "err", err) } err = table.Upload(ctx, force) if err != nil { // continue uploading other tables while skipping cleanup for a failed one. status = statusFailure - level.Error(pkg_util.Logger).Log("msg", "failed to upload dbs", "table", table.name, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to upload dbs", "table", table.name, "err", err) continue } @@ -190,7 +190,7 @@ func (tm *TableManager) uploadTables(ctx context.Context, force bool) { err = table.Cleanup(tm.cfg.DBRetainPeriod) if err != nil { // we do not want to stop uploading of dbs due to failures in cleaning them up so logging just the error here. - level.Error(pkg_util.Logger).Log("msg", "failed to cleanup uploaded dbs past their retention period", "table", table.name, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to cleanup uploaded dbs past their retention period", "table", table.name, "err", err) } } @@ -218,7 +218,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) { // since we are moving to keeping files for same table in a folder, if current element is a file we need to move it inside a directory with the same name // i.e file index_123 would be moved to path index_123/index_123. if !fileInfo.IsDir() { - level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", fileInfo.Name())) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", fileInfo.Name())) filePath := filepath.Join(tm.cfg.IndexDir, fileInfo.Name()) // create a folder with .temp suffix since we can't create a directory with same name as file. @@ -238,7 +238,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) { } } - level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("loading table %s", fileInfo.Name())) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading table %s", fileInfo.Name())) table, err := LoadTable(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()), tm.cfg.Uploader, tm.storageClient, tm.boltIndexClient) if err != nil { return nil, err @@ -248,7 +248,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) { // if table is nil it means it has no files in it so remove the folder for that table. err := os.Remove(filepath.Join(tm.cfg.IndexDir, fileInfo.Name())) if err != nil { - level.Error(pkg_util.Logger).Log("msg", "failed to remove empty table folder", "table", fileInfo.Name(), "err", err) + level.Error(util_log.Logger).Log("msg", "failed to remove empty table folder", "table", fileInfo.Name(), "err", err) } continue } diff --git a/pkg/storage/stores/shipper/util/util.go b/pkg/storage/stores/shipper/util/util.go index 8861043948d1..82c99cf1b147 100644 --- a/pkg/storage/stores/shipper/util/util.go +++ b/pkg/storage/stores/shipper/util/util.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/loki/pkg/chunkenc" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" ) @@ -30,7 +30,7 @@ func GetFileFromStorage(ctx context.Context, storageClient StorageClient, object defer func() { if err := readCloser.Close(); err != nil { - level.Error(util.Logger) + level.Error(util_log.Logger) } }() @@ -52,7 +52,7 @@ func GetFileFromStorage(ctx context.Context, storageClient StorageClient, object return err } - level.Info(util.Logger).Log("msg", fmt.Sprintf("downloaded file %s", objectKey)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("downloaded file %s", objectKey)) return f.Sync() } @@ -82,7 +82,7 @@ func BuildObjectKey(tableName, uploader, dbName string) string { } func CompressFile(src, dest string) error { - level.Info(util.Logger).Log("msg", "compressing the file", "src", src, "dest", dest) + level.Info(util_log.Logger).Log("msg", "compressing the file", "src", src, "dest", dest) uncompressedFile, err := os.Open(src) if err != nil { return err @@ -90,7 +90,7 @@ func CompressFile(src, dest string) error { defer func() { if err := uncompressedFile.Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close uncompressed file", "path", src, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close uncompressed file", "path", src, "err", err) } }() @@ -101,7 +101,7 @@ func CompressFile(src, dest string) error { defer func() { if err := compressedFile.Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close compressed file", "path", dest, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close compressed file", "path", dest, "err", err) } }() diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index dd480d805cf5..ecc0cdf1b36c 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - pkg_util "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" @@ -202,7 +202,7 @@ func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, refs = append(refs, r) } - cache, err := cache.New(cache.Config{Prefix: "chunks"}, nil, pkg_util.Logger) + cache, err := cache.New(cache.Config{Prefix: "chunks"}, nil, util_log.Logger) if err != nil { panic(err) } diff --git a/pkg/util/config.go b/pkg/util/config.go index 481d16c6ca7e..9b803d9640ea 100644 --- a/pkg/util/config.go +++ b/pkg/util/config.go @@ -5,7 +5,7 @@ import ( "io" "strings" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/version" "gopkg.in/yaml.v2" @@ -22,7 +22,7 @@ func LogConfig(cfg interface{}) error { cfgStr := string(lc) cfgStrs := strings.Split(cfgStr, "\n") for i := len(cfgStrs) - 1; i >= 0; i-- { - level.Info(util.Logger).Log("type", "config", "msg", cfgStrs[i]) + level.Info(util_log.Logger).Log("type", "config", "msg", cfgStrs[i]) } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go index 39b3cc7d6718..a93379bc7bba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go @@ -14,7 +14,6 @@ import ( "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/alertmanager/api" "github.com/prometheus/alertmanager/cluster" "github.com/prometheus/alertmanager/config" @@ -80,9 +79,6 @@ type Alertmanager struct { // Further, in upstream AM, this metric is handled using the config coordinator which we don't use // hence we need to generate the metric ourselves. configHashMetric prometheus.Gauge - - activeMtx sync.Mutex - active bool } var ( @@ -102,11 +98,9 @@ func init() { // New creates a new Alertmanager. func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { am := &Alertmanager{ - cfg: cfg, - logger: log.With(cfg.Logger, "user", cfg.UserID), - stop: make(chan struct{}), - active: false, - activeMtx: sync.Mutex{}, + cfg: cfg, + logger: log.With(cfg.Logger, "user", cfg.UserID), + stop: make(chan struct{}), configHashMetric: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "alertmanager_config_hash", Help: "Hash of the currently loaded alertmanager configuration.", @@ -269,55 +263,10 @@ func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config, rawCfg s go am.dispatcher.Run() go am.inhibitor.Run() - // Ensure the alertmanager is set to active - am.activeMtx.Lock() - am.active = true - am.activeMtx.Unlock() - am.configHashMetric.Set(md5HashAsMetricValue([]byte(rawCfg))) return nil } -// IsActive returns if the alertmanager is currently running -// or is paused -func (am *Alertmanager) IsActive() bool { - am.activeMtx.Lock() - defer am.activeMtx.Unlock() - return am.active -} - -// Pause running jobs in the alertmanager that are able to be restarted and sets -// to inactives -func (am *Alertmanager) Pause() { - // Set to inactive - am.activeMtx.Lock() - am.active = false - am.activeMtx.Unlock() - - // Stop the inhibitor and dispatcher which will be recreated when - // a new config is applied - if am.inhibitor != nil { - am.inhibitor.Stop() - am.inhibitor = nil - } - if am.dispatcher != nil { - am.dispatcher.Stop() - am.dispatcher = nil - } - - // Remove all of the active silences from the alertmanager - silences, _, err := am.silences.Query() - if err != nil { - level.Warn(am.logger).Log("msg", "unable to retrieve silences for removal", "err", err) - } - for _, si := range silences { - err = am.silences.Expire(si.Id) - if err != nil { - level.Warn(am.logger).Log("msg", "unable to remove silence", "err", err, "silence", si.Id) - } - } -} - // Stop stops the Alertmanager. func (am *Alertmanager) Stop() { if am.inhibitor != nil { @@ -330,6 +279,10 @@ func (am *Alertmanager) Stop() { am.alerts.Close() close(am.stop) +} + +func (am *Alertmanager) StopAndWait() { + am.Stop() am.wg.Wait() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go index 2617c58f3c48..0efeebde15ab 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) { }{Message: message}) if err != nil { - level.Error(util.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err) + level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go index d500bcafde5b..2d2a3de7a161 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go @@ -154,6 +154,12 @@ func (m *alertmanagerMetrics) addUserRegistry(user string, reg *prometheus.Regis m.regs.AddUserRegistry(user, reg) } +func (m *alertmanagerMetrics) removeUserRegistry(user string) { + // We neeed to go for a soft deletion here, as hard deletion requires + // that _all_ metrics except gauges are per-user. + m.regs.RemoveUserRegistry(user, false) +} + func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.alertsReceived out <- m.alertsInvalid diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go index 0a7bb17c5b09..9fe2d9dea1fd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go @@ -10,8 +10,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -60,7 +60,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go index 02c1d4d733d5..8b00eb79aa22 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go @@ -10,7 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Object Alert Storage Schema @@ -60,7 +60,7 @@ func (a *AlertStore) getAlertConfig(ctx context.Context, key string) (alerts.Ale return alerts.AlertConfigDesc{}, err } - defer runutil.CloseWithLogOnErr(util.Logger, readCloser, "close alert config reader") + defer runutil.CloseWithLogOnErr(util_log.Logger, readCloser, "close alert config reader") buf, err := ioutil.ReadAll(readCloser) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go index 27f1784eb6e6..b80a508b8d7c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go @@ -4,7 +4,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" ) -func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { +func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) { // When we initialize the alertmanager instance in the ring we want to start from // a clean situation, so whatever is the state we set it JOINING, while we keep existing // tokens (if any). @@ -24,5 +24,5 @@ func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler func (r *MultitenantAlertmanager) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} func (r *MultitenantAlertmanager) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (r *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) { +func (r *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go index a636122ba15e..d74e5edfcc5c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go @@ -28,6 +28,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -244,11 +245,11 @@ type MultitenantAlertmanager struct { // effect here. fallbackConfig string - // All the organization configurations that we have. Only used for instrumentation. - cfgs map[string]alerts.AlertConfigDesc - alertmanagersMtx sync.Mutex alertmanagers map[string]*Alertmanager + // Stores the current set of configurations we're running in each tenant's Alertmanager. + // Used for comparing configurations as we synchronize them. + cfgs map[string]alerts.AlertConfigDesc logger log.Logger alertmanagerMetrics *alertmanagerMetrics @@ -522,7 +523,7 @@ func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncR func (am *MultitenantAlertmanager) stopping(_ error) error { am.alertmanagersMtx.Lock() for _, am := range am.alertmanagers { - am.Stop() + am.StopAndWait() } am.alertmanagersMtx.Unlock() if am.peer != nil { // Tests don't setup any peer. @@ -604,17 +605,16 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi am.alertmanagersMtx.Lock() defer am.alertmanagersMtx.Unlock() - for user, userAM := range am.alertmanagers { - if _, exists := cfgs[user]; !exists { - // The user alertmanager is only paused in order to retain the prometheus metrics - // it has reported to its registry. If a new config for this user appears, this structure - // will be reused. - level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", user) - userAM.Pause() - delete(am.cfgs, user) - am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(user) - am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(user) - level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", user) + for userID, userAM := range am.alertmanagers { + if _, exists := cfgs[userID]; !exists { + level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", userID) + userAM.Stop() + delete(am.alertmanagers, userID) + delete(am.cfgs, userID) + am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(userID) + am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID) + am.alertmanagerMetrics.removeUserRegistry(userID) + level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", userID) } } } @@ -622,9 +622,6 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi // setConfig applies the given configuration to the alertmanager for `userID`, // creating an alertmanager if it doesn't already exist. func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { - am.alertmanagersMtx.Lock() - existing, hasExisting := am.alertmanagers[cfg.User] - am.alertmanagersMtx.Unlock() var userAmConfig *amconfig.Config var err error var hasTemplateChanges bool @@ -642,6 +639,10 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User) + am.alertmanagersMtx.Lock() + defer am.alertmanagersMtx.Unlock() + existing, hasExisting := am.alertmanagers[cfg.User] + rawCfg := cfg.RawConfig if cfg.RawConfig == "" { if am.fallbackConfig == "" { @@ -694,9 +695,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { if err != nil { return err } - am.alertmanagersMtx.Lock() am.alertmanagers[cfg.User] = newAM - am.alertmanagersMtx.Unlock() } else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges { level.Info(am.logger).Log("msg", "updating new per-tenant alertmanager", "user", cfg.User) // If the config changed, apply the new one. @@ -714,7 +713,7 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco newAM, err := New(&Config{ UserID: userID, DataDir: am.cfg.DataDir, - Logger: util.Logger, + Logger: util_log.Logger, Peer: am.peer, PeerTimeout: am.cfg.Cluster.PeerTimeout, Retention: am.cfg.Retention, @@ -749,11 +748,6 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re am.alertmanagersMtx.Unlock() if ok { - if !userAM.IsActive() { - level.Debug(am.logger).Log("msg", "the Alertmanager is not active", "user", userID) - http.Error(w, "the Alertmanager is not configured", http.StatusNotFound) - return - } userAM.mux.ServeHTTP(w, req) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index 84564fb4f0d6..d3c67f710542 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -790,7 +790,7 @@ func awsSessionFromURL(awsURL *url.URL) (client.ConfigProvider, error) { } path := strings.TrimPrefix(awsURL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } config, err := awscommon.ConfigFromURL(awsURL) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go index fea098c82334..b8aae77f5235 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go @@ -14,7 +14,7 @@ import ( "github.com/weaveworks/common/mtime" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -112,7 +112,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc, throttleRate := m.throttleRates[expected.Name] usageRate := m.usageRates[expected.Name] - level.Info(util.Logger).Log("msg", "checking write metrics", "table", current.Name, "queueLengths", fmt.Sprint(m.queueLengths), "throttleRate", throttleRate, "usageRate", usageRate) + level.Info(util_log.Logger).Log("msg", "checking write metrics", "table", current.Name, "queueLengths", fmt.Sprint(m.queueLengths), "throttleRate", throttleRate, "usageRate", usageRate) switch { case throttleRate < throttleFractionScaledown*float64(current.ProvisionedWrite) && @@ -170,7 +170,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc, readUsageRate := m.usageReadRates[expected.Name] readErrorRate := m.readErrorRates[expected.Name] - level.Info(util.Logger).Log("msg", "checking read metrics", "table", current.Name, "errorRate", readErrorRate, "readUsageRate", readUsageRate) + level.Info(util_log.Logger).Log("msg", "checking read metrics", "table", current.Name, "errorRate", readErrorRate, "readUsageRate", readUsageRate) // Read Scaling switch { // the table is at low/minimum capacity and it is being used -> scale up @@ -235,14 +235,14 @@ func scaleDown(tableName string, currentValue, minValue int64, newValue int64, l earliest := lastUpdated[tableName].Add(time.Duration(coolDown) * time.Second) if earliest.After(mtime.Now()) { - level.Info(util.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest, "op", operation) + level.Info(util_log.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest, "op", operation) return currentValue } // Reject a change that is less than 20% - AWS rate-limits scale-downs so save // our chances until it makes a bigger difference if newValue > currentValue*4/5 { - level.Info(util.Logger).Log("msg", "rejected de minimis "+msg, "table", tableName, "current", currentValue, "proposed", newValue, "op", operation) + level.Info(util_log.Logger).Log("msg", "rejected de minimis "+msg, "table", tableName, "current", currentValue, "proposed", newValue, "op", operation) return currentValue } @@ -254,12 +254,12 @@ func scaleDown(tableName string, currentValue, minValue int64, newValue int64, l totalUsage += u } if totalUsage < minUsageForScaledown { - level.Info(util.Logger).Log("msg", "rejected low usage "+msg, "table", tableName, "totalUsage", totalUsage, "op", operation) + level.Info(util_log.Logger).Log("msg", "rejected low usage "+msg, "table", tableName, "totalUsage", totalUsage, "op", operation) return currentValue } } - level.Info(util.Logger).Log("msg", msg, "table", tableName, operation, newValue) + level.Info(util_log.Logger).Log("msg", msg, "table", tableName, operation, newValue) lastUpdated[tableName] = mtime.Now() return newValue } @@ -270,12 +270,12 @@ func scaleUp(tableName string, currentValue, maxValue int64, newValue int64, las } earliest := lastUpdated[tableName].Add(time.Duration(coolDown) * time.Second) if !earliest.After(mtime.Now()) && newValue > currentValue { - level.Info(util.Logger).Log("msg", msg, "table", tableName, operation, newValue) + level.Info(util_log.Logger).Log("msg", msg, "table", tableName, operation, newValue) lastUpdated[tableName] = mtime.Now() return newValue } - level.Info(util.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest) + level.Info(util_log.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest) return currentValue } @@ -362,7 +362,7 @@ func promQuery(ctx context.Context, promAPI promV1.API, query string, duration, return nil, err } if wrngs != nil { - level.Warn(util.Logger).Log( + level.Warn(util_log.Logger).Log( "query", query, "start", queryRange.Start, "end", queryRange.End, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go index 864e410dd5c1..a6e09b361498 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go @@ -18,7 +18,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const arnPrefix = "arn:" @@ -234,7 +234,7 @@ func (m *mockDynamoDBClient) QueryPagesWithContext(ctx aws.Context, input *dynam continue } } else { - level.Warn(util.Logger).Log("msg", "unsupported FilterExpression", "expression", *input.FilterExpression) + level.Warn(util_log.Logger).Log("msg", "unsupported FilterExpression", "expression", *input.FilterExpression) } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index 1e638f6091f0..2c802b764fab 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -19,8 +19,8 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" - pkgutil "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Config for a StorageClient @@ -109,7 +109,7 @@ func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Sessi cluster.ConnectTimeout = cfg.ConnectTimeout cluster.ReconnectInterval = cfg.ReconnectInterval cluster.NumConns = cfg.NumConnections - cluster.Logger = log.With(pkgutil.Logger, "module", "gocql", "client", name) + cluster.Logger = log.With(util_log.Logger, "module", "gocql", "client", name) cluster.Registerer = prometheus.WrapRegistererWith( prometheus.Labels{"client": name}, reg) if cfg.Retries > 0 { @@ -536,7 +536,7 @@ type noopConvictionPolicy struct{} // Convicted means connections are removed - we don't want that. // Implementats gocql.ConvictionPolicy. func (noopConvictionPolicy) AddFailure(err error, host *gocql.HostInfo) bool { - level.Error(pkgutil.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String()) + level.Error(util_log.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String()) return false } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 15f566ffbb40..0fc096d3c801 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -540,7 +540,7 @@ func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQ return true }) if err != nil { - level.Error(util_log.WithContext(ctx, util.Logger)).Log("msg", "error querying storage", "err", err) + level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "error querying storage", "err", err) } return entries, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index 061a9b1c638a..aeb904885409 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -10,7 +10,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -211,7 +211,7 @@ func (c *Fetcher) processCacheResponse(ctx context.Context, chunks []Chunk, keys missing = append(missing, chunks[i]) i++ } else if chunkKey > keys[j] { - level.Warn(util.Logger).Log("msg", "got chunk from cache we didn't ask for") + level.Warn(util_log.Logger).Log("msg", "got chunk from cache we didn't ask for") j++ } else { requests = append(requests, decodeRequest{ diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go index 6d76abf9e3d0..bbb814badadf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go @@ -17,7 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var ( @@ -94,7 +94,7 @@ func (b *BoltIndexClient) reload() { for name := range b.dbs { if _, err := os.Stat(path.Join(b.cfg.Directory, name)); err != nil && os.IsNotExist(err) { removedDBs = append(removedDBs, name) - level.Debug(util.Logger).Log("msg", "boltdb file got removed", "filename", name) + level.Debug(util_log.Logger).Log("msg", "boltdb file got removed", "filename", name) continue } } @@ -106,7 +106,7 @@ func (b *BoltIndexClient) reload() { for _, name := range removedDBs { if err := b.dbs[name].Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err) continue } delete(b.dbs, name) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index f64776943481..ff9b5e44b2c9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" - pkgUtil "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // FSConfig is the config for a FSObjectClient. @@ -80,7 +80,7 @@ func (f *FSObjectClient) PutObject(_ context.Context, objectKey string, object i return err } - defer runutil.CloseWithLogOnErr(pkgUtil.Logger, fl, "fullPath: %s", fullPath) + defer runutil.CloseWithLogOnErr(util_log.Logger, fl, "fullPath: %s", fullPath) _, err = io.Copy(fl, object) if err != nil { @@ -187,7 +187,7 @@ func (f *FSObjectClient) DeleteObject(ctx context.Context, objectKey string) err func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) error { return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error { if !info.IsDir() && info.ModTime().Before(ts) { - level.Info(pkgUtil.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) + level.Info(util_log.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) if err := os.Remove(path); err != nil { return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go index 0799716afad5..d8fc70d788d5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go @@ -15,6 +15,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type deleteRequestHandlerMetrics struct { @@ -107,7 +108,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r } if err := dm.deleteStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil { - level.Error(util.Logger).Log("msg", "error adding delete request to the store", "err", err) + level.Error(util_log.Logger).Log("msg", "error adding delete request to the store", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -127,13 +128,13 @@ func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWrite deleteRequests, err := dm.deleteStore.GetAllDeleteRequestsForUser(ctx, userID) if err != nil { - level.Error(util.Logger).Log("msg", "error getting delete requests from the store", "err", err) + level.Error(util_log.Logger).Log("msg", "error getting delete requests from the store", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } if err := json.NewEncoder(w).Encode(deleteRequests); err != nil { - level.Error(util.Logger).Log("msg", "error marshalling response", "err", err) + level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err) http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) } } @@ -152,7 +153,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter deleteRequest, err := dm.deleteStore.GetDeleteRequest(ctx, userID, requestID) if err != nil { - level.Error(util.Logger).Log("msg", "error getting delete request from the store", "err", err) + level.Error(util_log.Logger).Log("msg", "error getting delete request from the store", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -173,7 +174,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter } if err := dm.deleteStore.RemoveDeleteRequest(ctx, userID, requestID, deleteRequest.CreatedAt, deleteRequest.StartTime, deleteRequest.EndTime); err != nil { - level.Error(util.Logger).Log("msg", "error cancelling the delete request", "err", err) + level.Error(util_log.Logger).Log("msg", "error cancelling the delete request", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go index 73348bf40ada..fdf2cc0914de 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go @@ -15,7 +15,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql/parser" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const tombstonesReloadDuration = 5 * time.Minute @@ -97,7 +97,7 @@ func (tl *TombstonesLoader) loop() { case <-tombstonesReloadTimer.C: err := tl.reloadTombstones() if err != nil { - level.Error(util.Logger).Log("msg", "error reloading tombstones", "err", err) + level.Error(util_log.Logger).Log("msg", "error reloading tombstones", "err", err) } case <-tl.quit: return @@ -285,7 +285,7 @@ func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *ca if numbers.results != "" { results, err := strconv.Atoi(numbers.results) if err != nil { - level.Error(util.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err) + level.Error(util_log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err) } else if maxResults < results { maxResults = results result.results = numbers.results @@ -296,7 +296,7 @@ func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *ca if numbers.store != "" { store, err := strconv.Atoi(numbers.store) if err != nil { - level.Error(util.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err) + level.Error(util_log.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err) } else if maxStore < store { maxStore = store result.store = numbers.store @@ -326,7 +326,7 @@ func (tl *TombstonesLoader) getCacheGenNumbers(userID string) *cacheGenNumbers { genNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID) if err != nil { - level.Error(util.Logger).Log("msg", "error loading cache generation numbers", "err", err) + level.Error(util_log.Logger).Log("msg", "error loading cache generation numbers", "err", err) tl.metrics.cacheGenLoadFailures.Inc() return &cacheGenNumbers{} } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index 441fc5f84cdd..6f6cc8f4d301 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -14,7 +14,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -882,7 +882,7 @@ func (v10Entries) FilterReadQueries(queries []IndexQuery, shard *astmapper.Shard s := strings.Split(query.HashValue, ":")[0] n, err := strconv.Atoi(s) if err != nil { - level.Error(util.Logger).Log( + level.Error(util_log.Logger).Log( "msg", "Unable to determine shard from IndexQuery", "HashValue", diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 6097b434b103..310404cd1335 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -23,7 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/objectclient" "github.com/cortexproject/cortex/pkg/chunk/openstack" "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Supported storage engines @@ -105,7 +105,7 @@ func (cfg *Config) Validate() error { if err := cfg.CassandraStorageConfig.Validate(); err != nil { return errors.Wrap(err, "invalid Cassandra Storage config") } - if err := cfg.GCPStorageConfig.Validate(util.Logger); err != nil { + if err := cfg.GCPStorageConfig.Validate(util_log.Logger); err != nil { return errors.Wrap(err, "invalid GCP Storage Storage config") } if err := cfg.Swift.Validate(); err != nil { @@ -222,7 +222,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, regis } path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) case "gcp": @@ -256,7 +256,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, regis } path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) case "azure": @@ -308,7 +308,7 @@ func NewTableClient(name string, cfg Config, registerer prometheus.Registerer) ( } path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } return aws.NewDynamoDBTableClient(cfg.AWSStorageConfig.DynamoDBConfig, registerer) case "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed": diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index eda8a83f753f..c4f46830471e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -18,7 +18,7 @@ import ( "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/mtime" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -215,7 +215,7 @@ func (m *TableManager) loop(ctx context.Context) error { if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error { return m.SyncTables(ctx) }); err != nil { - level.Error(util.Logger).Log("msg", "error syncing tables", "err", err) + level.Error(util_log.Logger).Log("msg", "error syncing tables", "err", err) } // Sleep for a bit to spread the sync load across different times if the tablemanagers are all started at once. @@ -231,7 +231,7 @@ func (m *TableManager) loop(ctx context.Context) error { if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error { return m.SyncTables(ctx) }); err != nil { - level.Error(util.Logger).Log("msg", "error syncing tables", "err", err) + level.Error(util_log.Logger).Log("msg", "error syncing tables", "err", err) } case <-ctx.Done(): return nil @@ -254,7 +254,7 @@ func (m *TableManager) checkAndCreateExtraTables() error { for _, tableDesc := range extraTables.Tables { if _, ok := existingTablesMap[tableDesc.Name]; !ok { // creating table - level.Info(util.Logger).Log("msg", "creating extra table", + level.Info(util_log.Logger).Log("msg", "creating extra table", "tableName", tableDesc.Name, "provisionedRead", tableDesc.ProvisionedRead, "provisionedWrite", tableDesc.ProvisionedWrite, @@ -272,7 +272,7 @@ func (m *TableManager) checkAndCreateExtraTables() error { continue } - level.Info(util.Logger).Log("msg", "checking throughput of extra table", "table", tableDesc.Name) + level.Info(util_log.Logger).Log("msg", "checking throughput of extra table", "table", tableDesc.Name) // table already exists, lets check actual throughput for tables is same as what is in configurations, if not let us update it current, _, err := extraTables.TableClient.DescribeTable(context.Background(), tableDesc.Name) if err != nil { @@ -280,7 +280,7 @@ func (m *TableManager) checkAndCreateExtraTables() error { } if !current.Equals(tableDesc) { - level.Info(util.Logger).Log("msg", "updating throughput of extra table", + level.Info(util_log.Logger).Log("msg", "updating throughput of extra table", "table", tableDesc.Name, "tableName", tableDesc.Name, "provisionedRead", tableDesc.ProvisionedRead, @@ -305,7 +305,7 @@ func (m *TableManager) bucketRetentionIteration(ctx context.Context) error { err := m.bucketClient.DeleteChunksBefore(ctx, mtime.Now().Add(-m.cfg.RetentionPeriod)) if err != nil { - level.Error(util.Logger).Log("msg", "error enforcing filesystem retention", "err", err) + level.Error(util_log.Logger).Log("msg", "error enforcing filesystem retention", "err", err) } // don't return error, otherwise timer service would stop. @@ -321,7 +321,7 @@ func (m *TableManager) SyncTables(ctx context.Context) error { } expected := m.calculateExpectedTables() - level.Info(util.Logger).Log("msg", "synching tables", "expected_tables", len(expected)) + level.Info(util_log.Logger).Log("msg", "synching tables", "expected_tables", len(expected)) toCreate, toCheckThroughput, toDelete, err := m.partitionTables(ctx, expected) if err != nil { @@ -473,7 +473,7 @@ func (m *TableManager) createTables(ctx context.Context, descriptions []TableDes merr := tsdb_errors.NewMulti() for _, desc := range descriptions { - level.Info(util.Logger).Log("msg", "creating table", "table", desc.Name) + level.Info(util_log.Logger).Log("msg", "creating table", "table", desc.Name) err := m.client.CreateTable(ctx, desc) if err != nil { numFailures++ @@ -490,12 +490,12 @@ func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDes merr := tsdb_errors.NewMulti() for _, desc := range descriptions { - level.Info(util.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name) + level.Info(util_log.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name) if !m.cfg.RetentionDeletesEnabled { continue } - level.Info(util.Logger).Log("msg", "deleting table", "table", desc.Name) + level.Info(util_log.Logger).Log("msg", "deleting table", "table", desc.Name) err := m.client.DeleteTable(ctx, desc.Name) if err != nil { numFailures++ @@ -509,7 +509,7 @@ func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDes func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDesc) error { for _, expected := range descriptions { - level.Debug(util.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name) + level.Debug(util_log.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name) current, isActive, err := m.client.DescribeTable(ctx, expected.Name) if err != nil { return err @@ -523,12 +523,12 @@ func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDes } if !isActive { - level.Info(util.Logger).Log("msg", "skipping update on table, not yet ACTIVE", "table", expected.Name) + level.Info(util_log.Logger).Log("msg", "skipping update on table, not yet ACTIVE", "table", expected.Name) continue } if expected.Equals(current) { - level.Info(util.Logger).Log("msg", "provisioned throughput on table, skipping", "table", current.Name, "read", current.ProvisionedRead, "write", current.ProvisionedWrite) + level.Info(util_log.Logger).Log("msg", "provisioned throughput on table, skipping", "table", current.Name, "read", current.ProvisionedRead, "write", current.ProvisionedWrite) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go index 583b38e3cb08..78e2e9a6d684 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go @@ -34,8 +34,48 @@ import ( var ( errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s" RingOp = ring.NewOp([]ring.IngesterState{ring.ACTIVE}, nil) + + DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter) compact.Grouper { + return compact.NewDefaultGrouper( + logger, + bkt, + false, // Do not accept malformed indexes + true, // Enable vertical compaction + reg, + blocksMarkedForDeletion, + garbageCollectedBlocks) + } + + DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, compact.Planner, error) { + compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) + if err != nil { + return nil, nil, err + } + + planner := compact.NewTSDBBasedPlanner(logger, cfg.BlockRanges.ToMilliseconds()) + return compactor, planner, nil + } ) +// BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks. +type BlocksGrouperFactory func( + ctx context.Context, + cfg Config, + bkt objstore.Bucket, + logger log.Logger, + reg prometheus.Registerer, + blocksMarkedForDeletion prometheus.Counter, + garbageCollectedBlocks prometheus.Counter, +) compact.Grouper + +// BlocksCompactorFactory builds and returns the compactor and planner to use to compact a tenant's blocks. +type BlocksCompactorFactory func( + ctx context.Context, + cfg Config, + logger log.Logger, + reg prometheus.Registerer, +) (compact.Compactor, compact.Planner, error) + // Config holds the Compactor config. type Config struct { BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"` @@ -66,6 +106,10 @@ type Config struct { // it in tests. retryMinBackoff time.Duration `yaml:"-"` retryMaxBackoff time.Duration `yaml:"-"` + + // Allow downstream projects to customise the blocks compactor. + BlocksGrouperFactory BlocksGrouperFactory `yaml:"-"` + BlocksCompactorFactory BlocksCompactorFactory `yaml:"-"` } // RegisterFlags registers the Compactor flags. @@ -124,9 +168,11 @@ type Compactor struct { // If empty, no users are disabled. If not empty, users in the map are disabled (not owned by this compactor). disabledUsers map[string]struct{} - // Function that creates bucket client, TSDB planner and compactor using the context. + // Functions that creates bucket client, grouper, planner and compactor using the context. // Useful for injecting mock objects from tests. - createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) + bucketClientFactory func(ctx context.Context) (objstore.Bucket, error) + blocksGrouperFactory BlocksGrouperFactory + blocksCompactorFactory BlocksCompactorFactory // Users scanner, used to discover users from the bucket. usersScanner *cortex_tsdb.UsersScanner @@ -135,8 +181,8 @@ type Compactor struct { blocksCleaner *BlocksCleaner // Underlying compactor and planner used to compact TSDB blocks. - tsdbCompactor tsdb.Compactor - tsdbPlanner compact.Planner + blocksCompactor compact.Compactor + blocksPlanner compact.Planner // Client used to run operations on the bucket storing blocks. bucketClient objstore.Bucket @@ -165,22 +211,21 @@ type Compactor struct { // NewCompactor makes a new Compactor. func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { - createDependencies := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) { - bucketClient, err := bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to create the bucket client") - } + bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) { + return bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) + } - compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) - if err != nil { - return nil, nil, nil, err - } + blocksGrouperFactory := compactorCfg.BlocksGrouperFactory + if blocksGrouperFactory == nil { + blocksGrouperFactory = DefaultBlocksGrouperFactory + } - planner := compact.NewTSDBBasedPlanner(logger, compactorCfg.BlockRanges.ToMilliseconds()) - return bucketClient, compactor, planner, nil + blocksCompactorFactory := compactorCfg.BlocksCompactorFactory + if blocksCompactorFactory == nil { + blocksCompactorFactory = DefaultBlocksCompactorFactory } - cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createDependencies) + cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory) if err != nil { return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") } @@ -193,16 +238,20 @@ func newCompactor( storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer, - createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error), + bucketClientFactory func(ctx context.Context) (objstore.Bucket, error), + blocksGrouperFactory BlocksGrouperFactory, + blocksCompactorFactory BlocksCompactorFactory, ) (*Compactor, error) { c := &Compactor{ - compactorCfg: compactorCfg, - storageCfg: storageCfg, - parentLogger: logger, - logger: log.With(logger, "component", "compactor"), - registerer: registerer, - syncerMetrics: newSyncerMetrics(registerer), - createDependencies: createDependencies, + compactorCfg: compactorCfg, + storageCfg: storageCfg, + parentLogger: logger, + logger: log.With(logger, "component", "compactor"), + registerer: registerer, + syncerMetrics: newSyncerMetrics(registerer), + bucketClientFactory: bucketClientFactory, + blocksGrouperFactory: blocksGrouperFactory, + blocksCompactorFactory: blocksCompactorFactory, compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_runs_started_total", @@ -273,10 +322,16 @@ func newCompactor( func (c *Compactor) starting(ctx context.Context) error { var err error - // Create bucket client and compactor. - c.bucketClient, c.tsdbCompactor, c.tsdbPlanner, err = c.createDependencies(ctx) + // Create bucket client. + c.bucketClient, err = c.bucketClientFactory(ctx) if err != nil { - return errors.Wrap(err, "failed to initialize compactor objects") + return errors.Wrap(err, "failed to create bucket client") + } + + // Create blocks compactor dependencies. + c.blocksCompactor, c.blocksPlanner, err = c.blocksCompactorFactory(ctx, c.compactorCfg, c.logger, c.registerer) + if err != nil { + return errors.Wrap(err, "failed to initialize compactor dependencies") } // Wrap the bucket client to write block deletion marks in the global location too. @@ -545,22 +600,12 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { return errors.Wrap(err, "failed to create syncer") } - grouper := compact.NewDefaultGrouper( - ulogger, - bucket, - false, // Do not accept malformed indexes - true, // Enable vertical compaction - reg, - c.blocksMarkedForDeletion, - c.garbageCollectedBlocks, - ) - compactor, err := compact.NewBucketCompactor( ulogger, syncer, - grouper, - c.tsdbPlanner, - c.tsdbCompactor, + c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.garbageCollectedBlocks), + c.blocksPlanner, + c.blocksCompactor, path.Join(c.compactorCfg.DataDir, "compact"), bucket, c.compactorCfg.CompactionConcurrency, diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go index 2c0608ea056b..d73c7a689067 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) { }{Message: message}) if err != nil { - level.Error(util.Logger).Log("msg", "unable to serve compactor ring page", "err", err) + level.Error(util_log.Logger).Log("msg", "unable to serve compactor ring page", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go index a39d76ef8836..d10f675f5e00 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go @@ -9,8 +9,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // RingConfig masks the ring lifecycler config which contains @@ -40,7 +40,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go index f3ba2f491a71..43873c2f0df7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Copied from Thanos, pkg/compact/compact.go. @@ -95,13 +96,13 @@ func (m *syncerMetrics) gatherThanosSyncerMetrics(reg *prometheus.Registry) { mf, err := reg.Gather() if err != nil { - level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) return } mfm, err := util.NewMetricFamilyMap(mf) if err != nil { - level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go index 9058ae722197..127292bc69de 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go @@ -24,7 +24,7 @@ import ( "github.com/cortexproject/cortex/pkg/configs/userconfig" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/log" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var ( @@ -114,7 +114,7 @@ func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) cfg, err := a.db.GetConfig(r.Context(), userID) if err == sql.ErrNoRows { @@ -152,7 +152,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) var cfg userconfig.Config switch parseConfigFormat(r.Header.Get("Content-Type"), FormatJSON) { @@ -202,7 +202,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { } func (a *API) validateAlertmanagerConfig(w http.ResponseWriter, r *http.Request) { - logger := log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) cfg, err := ioutil.ReadAll(r.Body) if err != nil { level.Error(logger).Log("msg", "error reading request body", "err", err) @@ -266,7 +266,7 @@ type ConfigsView struct { func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) { var cfgs map[string]userconfig.View var cfgErr error - logger := log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) rawSince := r.FormValue("since") if rawSince == "" { cfgs, cfgErr = a.db.GetAllConfigs(r.Context()) @@ -302,7 +302,7 @@ func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) if err := a.db.DeactivateConfig(r.Context(), userID); err != nil { if err == sql.ErrNoRows { @@ -324,7 +324,7 @@ func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := log.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) if err := a.db.RestoreConfig(r.Context(), userID); err != nil { if err == sql.ErrNoRows { diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go index cc57ad82f6de..5517d1cb5b55 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go @@ -18,8 +18,8 @@ import ( "github.com/weaveworks/common/instrument" "github.com/cortexproject/cortex/pkg/configs/userconfig" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" tls_cfg "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -155,7 +155,7 @@ func doRequest(endpoint string, timeout time.Duration, tlsConfig *tls.Config, si var config ConfigsResponse if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { - level.Error(util.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) + level.Error(util_log.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go index 2bd3bbd3e7ce..c66ff90fd61b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go @@ -9,6 +9,7 @@ import ( "time" "github.com/cortexproject/cortex/pkg/configs/userconfig" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/Masterminds/squirrel" "github.com/go-kit/kit/log/level" @@ -18,8 +19,6 @@ import ( "github.com/lib/pq" _ "github.com/lib/pq" // Import the postgres sql driver "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -60,7 +59,7 @@ func dbWait(db *sql.DB) error { if err == nil { return nil } - level.Warn(util.Logger).Log("msg", "db connection not established, retrying...", "err", err) + level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err) time.Sleep(time.Second << uint(tries)) } return errors.Wrapf(err, "db connection not established after %s", dbTimeout) @@ -88,13 +87,13 @@ func New(uri, migrationsDir string) (DB, error) { return DB{}, errors.Wrap(err, "database migrations initialization failed") } - level.Info(util.Logger).Log("msg", "running database migrations...") + level.Info(util_log.Logger).Log("msg", "running database migrations...") if err := m.Up(); err != nil { if err != migrate.ErrNoChange { return DB{}, errors.Wrap(err, "database migrations failed") } - level.Debug(util.Logger).Log("msg", "no change in schema, error (ignored)", "err", err) + level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err) } } @@ -354,7 +353,7 @@ func (d DB) Transaction(f func(DB) error) error { if err != nil { // Rollback error is ignored as we already have one in progress if err2 := tx.Rollback(); err2 != nil { - level.Warn(util.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2) + level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2) } return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go index 7a2cc3aac613..5ae94c01d300 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go @@ -5,10 +5,9 @@ import ( "fmt" "github.com/cortexproject/cortex/pkg/configs/userconfig" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" - - "github.com/cortexproject/cortex/pkg/util" ) // traced adds log trace lines on each db call @@ -17,7 +16,7 @@ type traced struct { } func (t traced) trace(name string, args ...interface{}) { - level.Debug(util.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) } func (t traced) GetConfig(ctx context.Context, userID string) (cfg userconfig.View, err error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go index 0b36801012ce..e55542fd443b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/prometheus/rules" legacy_promql "github.com/cortexproject/cortex/pkg/configs/legacy_promql" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // An ID is the ID of a single users's Cortex configuration. When a @@ -370,7 +370,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { labels.FromMap(rl.Annotations), nil, true, - log.With(util.Logger, "alert", rl.Alert.Value), + log.With(util_log.Logger, "alert", rl.Alert.Value), )) continue } @@ -418,7 +418,7 @@ func (c RulesConfig) parseV1() (map[string][]rules.Rule, error) { rule = rules.NewAlertingRule( r.Name, expr, r.Duration, r.Labels, r.Annotations, nil, true, - log.With(util.Logger, "alert", r.Name), + log.With(util_log.Logger, "alert", r.Name), ) case *legacy_promql.RecordStmt: diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index d92d18dbc5c3..aa06abf0019b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -487,10 +487,11 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro queryrange.PrometheusResponseExtractor{}, t.Cfg.Schema, promql.EngineOpts{ - Logger: util_log.Logger, - Reg: prometheus.DefaultRegisterer, - MaxSamples: t.Cfg.Querier.MaxSamples, - Timeout: t.Cfg.Querier.Timeout, + Logger: util_log.Logger, + Reg: prometheus.DefaultRegisterer, + MaxSamples: t.Cfg.Querier.MaxSamples, + Timeout: t.Cfg.Querier.Timeout, + EnableAtModifier: t.Cfg.Querier.AtModifierEnabled, NoStepSubqueryIntervalFn: func(int64) int64 { return t.Cfg.Querier.DefaultEvaluationInterval.Milliseconds() }, diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go index 7a565fbfb017..850aedcd32b2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go @@ -7,7 +7,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/weaveworks/common/server" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -47,7 +47,7 @@ func NewServerService(serv *server.Server, servicesToWaitFor func() []services.S // if not closed yet, wait until server stops. <-serverDone - level.Info(util.Logger).Log("msg", "server stopped") + level.Info(util_log.Logger).Log("msg", "server stopped") return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index 1b4679ec5800..45a3c3c3f346 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -170,10 +170,6 @@ type Config struct { // for testing and for extending the ingester by adding calls to the client IngesterClientFactory ring_client.PoolFactory `yaml:"-"` - // when true the distributor does not validate the label name, Cortex doesn't directly use - // this (and should never use it) but this feature is used by other projects built on top of it - SkipLabelNameValidation bool `yaml:"-"` - // This config is dynamically injected because defined in the querier config. ShuffleShardingLookbackPeriod time.Duration `yaml:"-"` } @@ -496,8 +492,7 @@ func (d *Distributor) Push(ctx context.Context, req *ingester_client.WriteReques return nil, err } - skipLabelNameValidation := d.cfg.SkipLabelNameValidation || req.GetSkipLabelNameValidation() - validatedSeries, err := d.validateSeries(ts, userID, skipLabelNameValidation) + validatedSeries, err := d.validateSeries(ts, userID, req.GetSkipLabelNameValidation()) // Errors in validation are considered non-fatal, as one series in a request may contain // invalid data but all the remaining series could be perfectly valid. @@ -568,7 +563,7 @@ func (d *Distributor) Push(ctx context.Context, req *ingester_client.WriteReques op = ring.Write } - err = ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.IngesterDesc, indexes []int) error { + err = ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { timeseries := make([]ingester_client.PreallocTimeseries, 0, len(indexes)) var metadata []*ingester_client.MetricMetadata @@ -621,7 +616,7 @@ func sortLabelsIfNeeded(labels []ingester_client.LabelAdapter) { }) } -func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, timeseries []ingester_client.PreallocTimeseries, metadata []*ingester_client.MetricMetadata, source ingester_client.WriteRequest_SourceEnum) error { +func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []ingester_client.PreallocTimeseries, metadata []*ingester_client.MetricMetadata, source ingester_client.WriteRequest_SourceEnum) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err @@ -653,7 +648,7 @@ func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, time // ForReplicationSet runs f, in parallel, for all ingesters in the input replication set. func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, ingester_client.IngesterClient) (interface{}, error)) ([]interface{}, error) { - return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) { + return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go index af77a5d98bce..78296341f78a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go @@ -9,8 +9,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // RingConfig masks the ring lifecycler config which contains @@ -36,7 +36,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go index d7b526b497f2..6aef3e7fd317 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go @@ -21,7 +21,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/codec" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -155,7 +155,7 @@ func newClusterTracker(cfg HATrackerConfig, limits haTrackerLimits, reg promethe } t := &haTracker{ - logger: util.Logger, + logger: util_log.Logger, cfg: cfg, updateTimeoutJitter: jitter, limits: limits, @@ -259,7 +259,7 @@ func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica s // The callback within checkKVStore will return a replicasNotMatchError if the sample is being deduped, // otherwise there may have been an actual error CAS'ing that we should log. if !errors.Is(err, replicasNotMatchError{}) { - level.Error(util.Logger).Log("msg", "rejecting sample", "err", err) + level.Error(util_log.Logger).Log("msg", "rejecting sample", "err", err) } } return err diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go index 501ce4a42236..b8a214ab98ce 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go @@ -129,7 +129,7 @@ func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.Replica func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (model.Matrix, error) { // Fetch samples from multiple ingesters in parallel, using the replicationSet // to deal with consistency. - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -174,7 +174,7 @@ func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.Re // queryIngesterStream queries the ingesters using the new streaming API. func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) { // Fetch samples from multiple ingesters - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go index 6157599a8758..4cf4495a783c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go +++ b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go @@ -11,6 +11,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -87,7 +88,7 @@ func (f *Flusher) running(ctx context.Context) error { // Sleeping to give a chance to Prometheus // to collect the metrics. - level.Info(util.Logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String()) + level.Info(util_log.Logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String()) time.Sleep(postFlushSleepTime) if err := services.StopAndAwaitTerminated(ctx, ing); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index 0e2fa8270451..139acb801932 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -1578,7 +1578,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) { uploaded, err := userDB.shipper.Sync(ctx) if err != nil { - level.Warn(util.Logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err) + level.Warn(log.Logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err) } else { level.Debug(log.Logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded) } @@ -1590,7 +1590,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) { // the cached list of blocks in such case, so we're not handling it. if uploaded > 0 { if err := userDB.updateCachedShippedBlocks(); err != nil { - level.Error(util.Logger).Log("msg", "failed to update cached shipped blocks after shipper synchronisation", "user", userID, "err", err) + level.Error(log.Logger).Log("msg", "failed to update cached shipped blocks after shipper synchronisation", "user", userID, "err", err) } } @@ -1811,7 +1811,7 @@ func (i *Ingester) v2FlushHandler(w http.ResponseWriter, _ *http.Request) { } if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { - level.Info(util.Logger).Log("msg", "flushing TSDB blocks: triggering shipping") + level.Info(log.Logger).Log("msg", "flushing TSDB blocks: triggering shipping") select { case i.TSDBState.shipTrigger <- ch: diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go index 18977e7176e7..bf3a2a1bfae6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go @@ -10,7 +10,7 @@ import ( "github.com/prometheus/common/model" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const maxMappedFP = 1 << 20 // About 1M fingerprints reserved for mapping. @@ -105,7 +105,7 @@ func (m *fpMapper) maybeAddMapping( // A new mapping has to be created. mappedFP = m.nextMappedFP() mappedFPs[ms] = mappedFP - level.Debug(util.Logger).Log( + level.Debug(util_log.Logger).Log( "msg", "fingerprint collision detected, mapping to new fingerprint", "old_fp", fp, "new_fp", mappedFP, @@ -119,7 +119,7 @@ func (m *fpMapper) maybeAddMapping( m.mtx.Lock() m.mappings[fp] = mappedFPs m.mtx.Unlock() - level.Debug(util.Logger).Log( + level.Debug(util_log.Logger).Log( "msg", "fingerprint collision detected, mapping to new fingerprint", "old_fp", fp, "new_fp", mappedFP, diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go index b9b87bda7e0d..305aacb0c48d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go @@ -17,6 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var ( @@ -53,7 +54,7 @@ func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream clien // round this loop. if fromIngesterID == "" { fromIngesterID = wireSeries.FromIngesterId - level.Info(util.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) + level.Info(util_log.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) // Before transfer, make sure 'from' ingester is in correct state to call ClaimTokensFor later err := i.checkFromIngesterIsInLeavingState(stream.Context(), fromIngesterID) @@ -90,13 +91,13 @@ func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream clien } if seriesReceived == 0 { - level.Error(util.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) + level.Error(util_log.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) retErr = fmt.Errorf("TransferChunks: no series") return } if fromIngesterID == "" { - level.Error(util.Logger).Log("msg", "received TransferChunks request with no ID from ingester") + level.Error(util_log.Logger).Log("msg", "received TransferChunks request with no ID from ingester") retErr = fmt.Errorf("no ingester id") return } @@ -139,10 +140,10 @@ func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) e // Close the stream last, as this is what tells the "from" ingester that // it's OK to shut down. if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil { - level.Error(util.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) + level.Error(util_log.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) return err } - level.Info(util.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) + level.Info(util_log.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) return nil } @@ -186,12 +187,12 @@ func (i *Ingester) transfer(ctx context.Context, xfer func() error) error { return } - level.Error(util.Logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state) + level.Error(util_log.Logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state) // Enter PENDING state (only valid from JOINING) if i.lifecycler.GetState() == ring.JOINING { if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil { - level.Error(util.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err) + level.Error(util_log.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err) os.Exit(1) } } @@ -267,7 +268,7 @@ func fromWireChunks(wireChunks []client.Chunk) ([]*desc, error) { func (i *Ingester) TransferOut(ctx context.Context) error { // The blocks storage doesn't support blocks transferring. if i.cfg.BlocksStorageEnabled { - level.Info(util.Logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage") + level.Info(util_log.Logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage") return ring.ErrTransferDisabled } @@ -287,23 +288,23 @@ func (i *Ingester) TransferOut(ctx context.Context) error { for backoff.Ongoing() { err = i.transferOut(ctx) if err == nil { - level.Info(util.Logger).Log("msg", "transfer successfully completed") + level.Info(util_log.Logger).Log("msg", "transfer successfully completed") return nil } - level.Warn(util.Logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries) + level.Warn(util_log.Logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries) backoff.Wait() } - level.Error(util.Logger).Log("msg", "all transfer attempts failed", "err", err) + level.Error(util_log.Logger).Log("msg", "all transfer attempts failed", "err", err) return backoff.Err() } func (i *Ingester) transferOut(ctx context.Context) error { userStatesCopy := i.userStates.cp() if len(userStatesCopy) == 0 { - level.Info(util.Logger).Log("msg", "nothing to transfer") + level.Info(util_log.Logger).Log("msg", "nothing to transfer") return nil } @@ -312,7 +313,7 @@ func (i *Ingester) transferOut(ctx context.Context) error { return fmt.Errorf("cannot find ingester to transfer chunks to: %w", err) } - level.Info(util.Logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr) + level.Info(util_log.Logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr) c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig) if err != nil { return err @@ -367,12 +368,12 @@ func (i *Ingester) transferOut(ctx context.Context) error { } i.flushQueuesDone.Wait() - level.Info(util.Logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr) + level.Info(util_log.Logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr) return nil } // findTargetIngester finds an ingester in PENDING state. -func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.IngesterDesc, error) { +func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.InstanceDesc, error) { ringDesc, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go index 8ec6b6abd9f5..15d93d689473 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go @@ -26,7 +26,7 @@ import ( "github.com/prometheus/prometheus/tsdb/wal" "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // WALConfig is config for the Write Ahead Log. @@ -109,7 +109,7 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register if registerer != nil { walRegistry = prometheus.WrapRegistererWith(prometheus.Labels{"kind": "wal"}, registerer) } - tsdbWAL, err := wal.NewSize(util.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false) + tsdbWAL, err := wal.NewSize(util_log.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false) if err != nil { return nil, err } @@ -219,19 +219,19 @@ func (w *walWrapper) run() { select { case <-ticker.C: start := time.Now() - level.Info(util.Logger).Log("msg", "starting checkpoint") + level.Info(util_log.Logger).Log("msg", "starting checkpoint") if err := w.performCheckpoint(false); err != nil { - level.Error(util.Logger).Log("msg", "error checkpointing series", "err", err) + level.Error(util_log.Logger).Log("msg", "error checkpointing series", "err", err) continue } elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "checkpoint done", "time", elapsed.String()) + level.Info(util_log.Logger).Log("msg", "checkpoint done", "time", elapsed.String()) w.checkpointDuration.Observe(elapsed.Seconds()) case <-w.quit: if w.cfg.checkpointDuringShutdown { - level.Info(util.Logger).Log("msg", "creating checkpoint before shutdown") + level.Info(util_log.Logger).Log("msg", "creating checkpoint before shutdown") if err := w.performCheckpoint(true); err != nil { - level.Error(util.Logger).Log("msg", "error checkpointing series during shutdown", "err", err) + level.Error(util_log.Logger).Log("msg", "error checkpointing series during shutdown", "err", err) } } return @@ -292,7 +292,7 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) { // Checkpoint is named after the last WAL segment present so that when replaying the WAL // we can start from that particular WAL segment. checkpointDir := filepath.Join(w.wal.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment)) - level.Info(util.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) + level.Info(util_log.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) checkpointDirTemp := checkpointDir + ".tmp" if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil { @@ -389,14 +389,14 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) { if err := w.wal.Truncate(lastCh); err != nil { // It is fine to have old WAL segments hanging around if deletion failed. // We can try again next time. - level.Error(util.Logger).Log("msg", "error deleting old WAL segments", "err", err) + level.Error(util_log.Logger).Log("msg", "error deleting old WAL segments", "err", err) } if lastCh >= 0 { if err := w.deleteCheckpoints(lastCh); err != nil { // It is fine to have old checkpoints hanging around if deletion failed. // We can try again next time. - level.Error(util.Logger).Log("msg", "error deleting old checkpoint", "err", err) + level.Error(util_log.Logger).Log("msg", "error deleting old checkpoint", "err", err) } } @@ -520,17 +520,17 @@ func recoverFromWAL(ingester *Ingester) error { params.seriesCache[i] = make(map[string]map[uint64]*memorySeries) } - level.Info(util.Logger).Log("msg", "recovering from checkpoint") + level.Info(util_log.Logger).Log("msg", "recovering from checkpoint") start := time.Now() userStates, idx, err := processCheckpointWithRepair(params) if err != nil { return err } elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "recovered from checkpoint", "time", elapsed.String()) + level.Info(util_log.Logger).Log("msg", "recovered from checkpoint", "time", elapsed.String()) if segExists, err := segmentsExist(params.walDir); err == nil && !segExists { - level.Info(util.Logger).Log("msg", "no segments found, skipping recover from segments") + level.Info(util_log.Logger).Log("msg", "no segments found, skipping recover from segments") ingester.userStatesMtx.Lock() ingester.userStates = userStates ingester.userStatesMtx.Unlock() @@ -539,13 +539,13 @@ func recoverFromWAL(ingester *Ingester) error { return err } - level.Info(util.Logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx) + level.Info(util_log.Logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx) start = time.Now() if err := processWALWithRepair(idx, userStates, params); err != nil { return err } elapsed = time.Since(start) - level.Info(util.Logger).Log("msg", "recovered from WAL", "time", elapsed.String()) + level.Info(util_log.Logger).Log("msg", "recovered from WAL", "time", elapsed.String()) ingester.userStatesMtx.Lock() ingester.userStates = userStates @@ -563,11 +563,11 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int return nil, -1, err } if idx < 0 { - level.Info(util.Logger).Log("msg", "no checkpoint found") + level.Info(util_log.Logger).Log("msg", "no checkpoint found") return userStates, -1, nil } - level.Info(util.Logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir)) err = processCheckpoint(lastCheckpointDir, userStates, params) if err == nil { @@ -577,7 +577,7 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int // We don't call repair on checkpoint as losing even a single record is like losing the entire data of a series. // We try recovering from the older checkpoint instead. params.ingester.metrics.walCorruptionsTotal.Inc() - level.Error(util.Logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err) + level.Error(util_log.Logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err) // Deleting this checkpoint to try the previous checkpoint. if err := os.RemoveAll(lastCheckpointDir); err != nil { @@ -599,7 +599,7 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int return userStates, -1, nil } - level.Info(util.Logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir)) + level.Info(util_log.Logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir)) if err := processCheckpoint(lastCheckpointDir, userStates, params); err != nil { // We won't attempt the repair again even if its the old checkpoint. params.ingester.metrics.walCorruptionsTotal.Inc() @@ -782,18 +782,18 @@ func processWALWithRepair(startSegment int, userStates *userStates, params walRe } params.ingester.metrics.walCorruptionsTotal.Inc() - level.Error(util.Logger).Log("msg", "error in replaying from WAL", "err", corruptErr) + level.Error(util_log.Logger).Log("msg", "error in replaying from WAL", "err", corruptErr) // Attempt repair. - level.Info(util.Logger).Log("msg", "attempting repair of the WAL") - w, err := wal.New(util.Logger, nil, params.walDir, true) + level.Info(util_log.Logger).Log("msg", "attempting repair of the WAL") + w, err := wal.New(util_log.Logger, nil, params.walDir, true) if err != nil { return err } err = w.Repair(corruptErr) if err != nil { - level.Error(util.Logger).Log("msg", "error in repairing WAL", "err", err) + level.Error(util_log.Logger).Log("msg", "error in repairing WAL", "err", err) } return tsdb_errors.NewMulti(err, w.Close()).Err() @@ -970,7 +970,7 @@ func processWALSamples(userStates *userStates, stateCache map[string]*userState, // If the series was not created in recovering checkpoint or // from the labels of any records previous to this, there // is no way to get the labels for this fingerprint. - level.Warn(util.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String()) + level.Warn(util_log.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String()) continue } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go index 989c6bdb92f0..d9f309e1112c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/prometheus/promql/parser" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var summableAggregates = map[parser.ItemType]struct{}{ @@ -90,7 +90,7 @@ func CanParallelize(node parser.Node) bool { return true default: - level.Error(util.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node)) + level.Error(util_log.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node)) return false } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go index 0e408dcbf10b..8c94ad4b64f2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go @@ -17,6 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -52,7 +53,7 @@ func (s *blocksStoreBalancedSet) starting(ctx context.Context) error { func (s *blocksStoreBalancedSet) resolve(ctx context.Context) error { if err := s.dnsProvider.Resolve(ctx, s.serviceAddresses); err != nil { - level.Error(util.Logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses) + level.Error(util_log.Logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses) } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index ce8d12ee21e3..4599df2a5aec 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -28,6 +28,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -42,6 +43,7 @@ type Config struct { MaxSamples int `yaml:"max_samples"` QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` QueryStoreForLabels bool `yaml:"query_store_for_labels_enabled"` + AtModifierEnabled bool `yaml:"at_modifier_enabled"` // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. QueryStoreAfter time.Duration `yaml:"query_store_after"` @@ -88,6 +90,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") + f.BoolVar(&cfg.AtModifierEnabled, "querier.at-modifier-enabled", false, "Enable the @ modifier in PromQL.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") @@ -164,12 +167,13 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor }) engine := promql.NewEngine(promql.EngineOpts{ - Logger: util.Logger, + Logger: util_log.Logger, Reg: reg, ActiveQueryTracker: createActiveQueryTracker(cfg), MaxSamples: cfg.MaxSamples, Timeout: cfg.Timeout, LookbackDelta: cfg.LookbackDelta, + EnableAtModifier: cfg.AtModifierEnabled, NoStepSubqueryIntervalFn: func(int64) int64 { return cfg.DefaultEvaluationInterval.Milliseconds() }, @@ -195,7 +199,7 @@ func createActiveQueryTracker(cfg Config) *promql.ActiveQueryTracker { dir := cfg.ActiveQueryTrackerDir if dir != "" { - return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util.Logger) + return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util_log.Logger) } return nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go index 0b38a181785a..752b3ed463fd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "sort" + "strings" "time" "github.com/go-kit/kit/log" @@ -17,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/promql/parser" "github.com/uber/jaeger-client-go" "github.com/weaveworks/common/httpgrpc" @@ -209,9 +211,9 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { cached, ok := s.get(ctx, key) if ok { - response, extents, err = s.handleHit(ctx, r, cached) + response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime) } else { - response, extents, err = s.handleMiss(ctx, r) + response, extents, err = s.handleMiss(ctx, r, maxCacheTime) } if err == nil && len(extents) > 0 { @@ -226,7 +228,7 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { } // shouldCacheResponse says whether the response should be cached or not. -func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool { +func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Response, maxCacheTime int64) bool { headerValues := getHeaderValuesWithName(r, cacheControlHeader) for _, v := range headerValues { if v == noStoreValue { @@ -235,6 +237,10 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool } } + if !s.isAtModifierCachable(req, maxCacheTime) { + return false + } + if s.cacheGenNumberLoader == nil { return true } @@ -257,6 +263,55 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool return true } +var errAtModifierAfterEnd = errors.New("at modifier after end") + +// isAtModifierCachable returns true if the @ modifier result +// is safe to cache. +func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { + // There are 2 cases when @ modifier is not safe to cache: + // 1. When @ modifier points to time beyond the maxCacheTime. + // 2. If the @ modifier time is > the query range end while being + // below maxCacheTime. In such cases if any tenant is intentionally + // playing with old data, we could cache empty result if we look + // beyond query end. + query := r.GetQuery() + if !strings.Contains(query, "@") { + return true + } + expr, err := parser.ParseExpr(query) + if err != nil { + // We are being pessimistic in such cases. + level.Warn(s.logger).Log("msg", "failed to parse query, considering @ modifier as not cachable", "query", query, "err", err) + return false + } + + end := r.GetEnd() + atModCachable := true + parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error { + switch e := n.(type) { + case *parser.VectorSelector: + if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) { + atModCachable = false + return errAtModifierAfterEnd + } + case *parser.MatrixSelector: + ts := e.VectorSelector.(*parser.VectorSelector).Timestamp + if ts != nil && (*ts > end || *ts > maxCacheTime) { + atModCachable = false + return errAtModifierAfterEnd + } + case *parser.SubqueryExpr: + if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) { + atModCachable = false + return errAtModifierAfterEnd + } + } + return nil + }) + + return atModCachable +} + func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) { for _, hv := range r.GetHeaders() { if hv.GetName() != headerName { @@ -269,13 +324,13 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri return } -func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Extent, error) { +func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) { response, err := s.next.Do(ctx, r) if err != nil { return nil, nil, err } - if !s.shouldCacheResponse(ctx, response) { + if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) { return response, []Extent{}, nil } @@ -290,7 +345,7 @@ func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Ex return response, extents, nil } -func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent) (Response, []Extent, error) { +func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) { var ( reqResps []RequestResponse err error @@ -315,7 +370,7 @@ func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent for _, reqResp := range reqResps { responses = append(responses, reqResp.Response) - if !s.shouldCacheResponse(ctx, reqResp.Response) { + if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) { continue } extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go index 102865ede1aa..efe10149386b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -21,7 +21,7 @@ type BasicLifecyclerDelegate interface { // OnRingInstanceRegister is called while the lifecycler is registering the // instance within the ring and should return the state and set of tokens to // use for the instance itself. - OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) + OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) // OnRingInstanceTokens is called once the instance tokens are set and are // stable within the ring (honoring the observe period, if set). @@ -34,7 +34,7 @@ type BasicLifecyclerDelegate interface { // OnRingInstanceHeartbeat is called while the instance is updating its heartbeat // in the ring. - OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) + OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) } type BasicLifecyclerConfig struct { @@ -77,7 +77,7 @@ type BasicLifecycler struct { // The current instance state. currState sync.RWMutex - currInstanceDesc *IngesterDesc + currInstanceDesc *InstanceDesc } // NewBasicLifecycler makes a new BasicLifecycler. @@ -194,7 +194,7 @@ func (l *BasicLifecycler) running(ctx context.Context) error { f() case <-ctx.Done(): - level.Info(util.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName) + level.Info(util_log.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName) return nil } } @@ -239,7 +239,7 @@ heartbeatLoop: // registerInstance registers the instance in the ring. The initial state and set of tokens // depends on the OnRingInstanceRegister() delegate function. func (l *BasicLifecycler) registerInstance(ctx context.Context) error { - var instanceDesc IngesterDesc + var instanceDesc InstanceDesc err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) @@ -327,7 +327,7 @@ func (l *BasicLifecycler) waitStableTokens(ctx context.Context, period time.Dura func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool { result := false - err := l.updateInstance(ctx, func(r *Desc, i *IngesterDesc) bool { + err := l.updateInstance(ctx, func(r *Desc, i *InstanceDesc) bool { // At this point, we should have the same tokens as we have registered before. actualTokens, takenTokens := r.TokensFor(l.cfg.ID) @@ -385,8 +385,8 @@ func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { return nil } -func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *IngesterDesc) bool) error { - var instanceDesc IngesterDesc +func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *InstanceDesc) bool) error { + var instanceDesc InstanceDesc err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) @@ -431,7 +431,7 @@ func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, // heartbeat updates the instance timestamp within the ring. This function is guaranteed // to be called within the lifecycler main goroutine. func (l *BasicLifecycler) heartbeat(ctx context.Context) { - err := l.updateInstance(ctx, func(r *Desc, i *IngesterDesc) bool { + err := l.updateInstance(ctx, func(r *Desc, i *InstanceDesc) bool { l.delegate.OnRingInstanceHeartbeat(l, r, i) i.Timestamp = time.Now().Unix() return true @@ -448,7 +448,7 @@ func (l *BasicLifecycler) heartbeat(ctx context.Context) { // changeState of the instance within the ring. This function is guaranteed // to be called within the lifecycler main goroutine. func (l *BasicLifecycler) changeState(ctx context.Context, state IngesterState) error { - err := l.updateInstance(ctx, func(_ *Desc, i *IngesterDesc) bool { + err := l.updateInstance(ctx, func(_ *Desc, i *InstanceDesc) bool { // No-op if the state hasn't changed. if i.State == state { return false diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go index 8006d350767e..7126198e1504 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go @@ -21,7 +21,7 @@ func NewLeaveOnStoppingDelegate(next BasicLifecyclerDelegate, logger log.Logger) } } -func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { +func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) { return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) } @@ -37,7 +37,7 @@ func (d *LeaveOnStoppingDelegate) OnRingInstanceStopping(lifecycler *BasicLifecy d.next.OnRingInstanceStopping(lifecycler) } -func (d *LeaveOnStoppingDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) { +func (d *LeaveOnStoppingDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) { d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc) } @@ -57,7 +57,7 @@ func NewTokensPersistencyDelegate(path string, state IngesterState, next BasicLi } } -func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { +func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) { // Skip if no path has been configured. if d.tokensPath == "" { level.Info(d.logger).Log("msg", "not loading tokens from file, tokens file path is empty") @@ -82,7 +82,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife // Signal the next delegate that the tokens have been loaded, miming the // case the instance exist in the ring (which is OK because the lifecycler // will correctly reconcile this case too). - return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), IngesterDesc{ + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), InstanceDesc{ Addr: lifecycler.GetInstanceAddr(), Timestamp: time.Now().Unix(), RegisteredTimestamp: lifecycler.GetRegisteredAt().Unix(), @@ -106,7 +106,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceStopping(lifecycler *BasicLife d.next.OnRingInstanceStopping(lifecycler) } -func (d *TokensPersistencyDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) { +func (d *TokensPersistencyDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) { d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc) } @@ -126,7 +126,7 @@ func NewAutoForgetDelegate(forgetPeriod time.Duration, next BasicLifecyclerDeleg } } -func (d *AutoForgetDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { +func (d *AutoForgetDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) { return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) } @@ -138,7 +138,7 @@ func (d *AutoForgetDelegate) OnRingInstanceStopping(lifecycler *BasicLifecycler) d.next.OnRingInstanceStopping(lifecycler) } -func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) { +func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) { for id, instance := range ringDesc.Ingesters { lastHeartbeat := time.Unix(instance.GetTimestamp(), 0) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go index 3990bdd4dd30..c24dc200deea 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go @@ -16,7 +16,7 @@ type batchTracker struct { } type instance struct { - desc IngesterDesc + desc InstanceDesc itemTrackers []*itemTracker indexes []int } @@ -38,7 +38,7 @@ type itemTracker struct { // to send to that instance. // // Not implemented as a method on Ring so we can test separately. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(IngesterDesc, []int) error, cleanup func()) error { +func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { if r.InstancesCount() <= 0 { return fmt.Errorf("DoBatch: InstancesCount <= 0") } @@ -47,7 +47,7 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb instances := make(map[string]instance, r.InstancesCount()) var ( - bufDescs [GetBufferSize]IngesterDesc + bufDescs [GetBufferSize]InstanceDesc bufHosts [GetBufferSize]string bufZones [GetBufferSize]string ) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go index 41bc8728abb3..39da1db688a9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go @@ -14,6 +14,7 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -158,7 +159,7 @@ func (p *Pool) removeStaleClients() { serviceAddrs, err := p.discovery() if err != nil { - level.Error(util.Logger).Log("msg", "error removing stale clients", "err", err) + level.Error(util_log.Logger).Log("msg", "error removing stale clients", "err", err) return } @@ -166,7 +167,7 @@ func (p *Pool) removeStaleClients() { if util.StringsContain(serviceAddrs, addr) { continue } - level.Info(util.Logger).Log("msg", "removing stale client", "addr", addr) + level.Info(util_log.Logger).Log("msg", "removing stale client", "addr", addr) p.RemoveClientFor(addr) } } @@ -179,7 +180,7 @@ func (p *Pool) cleanUnhealthy() { if ok { err := healthCheck(client, p.cfg.HealthCheckTimeout) if err != nil { - level.Warn(util.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) + level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) p.RemoveClientFor(addr) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go index e0ab7ce64b93..1706edb2a175 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go @@ -1,12 +1,17 @@ package client import ( + "errors" + "github.com/cortexproject/cortex/pkg/ring" ) func NewRingServiceDiscovery(r ring.ReadRing) PoolServiceDiscovery { return func() ([]string, error) { replicationSet, err := r.GetAllHealthy(ring.Reporting) + if errors.Is(err, ring.ErrEmptyRing) { + return nil, nil + } if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go index c1e12863e957..1c39a473ced7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go @@ -17,6 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -143,14 +144,14 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou options := &consul.QueryOptions{} kvp, _, err := c.kv.Get(key, options.WithContext(ctx)) if err != nil { - level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting key", "key", key, "err", err) continue } var intermediate interface{} if kvp != nil { out, err := c.codec.Decode(kvp.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } // If key doesn't exist, index will be 0. @@ -174,7 +175,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou bytes, err := c.codec.Encode(intermediate) if err != nil { - level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error serialising value", "key", key, "err", err) continue } ok, _, err := c.kv.CAS(&consul.KVPair{ @@ -183,11 +184,11 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou ModifyIndex: index, }, writeOptions.WithContext(ctx)) if err != nil { - level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error CASing", "key", key, "err", err) continue } if !ok { - level.Debug(util.Logger).Log("msg", "error CASing, trying again", "key", key, "index", index) + level.Debug(util_log.Logger).Log("msg", "error CASing, trying again", "key", key, "index", index) continue } return nil @@ -213,7 +214,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b if errors.Is(err, context.Canceled) { break } - level.Error(util.Logger).Log("msg", "error while rate-limiting", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error while rate-limiting", "key", key, "err", err) backoff.Wait() continue } @@ -230,7 +231,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b // Don't backoff if value is not found (kvp == nil). In that case, Consul still returns index value, // and next call to Get will block as expected. We handle missing value below. if err != nil { - level.Error(util.Logger).Log("msg", "error getting path", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting path", "key", key, "err", err) backoff.Wait() continue } @@ -243,13 +244,13 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b } if kvp == nil { - level.Info(util.Logger).Log("msg", "value is nil", "key", key, "index", index) + level.Info(util_log.Logger).Log("msg", "value is nil", "key", key, "index", index) continue } out, err := c.codec.Decode(kvp.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } if !f(out) { @@ -273,7 +274,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, if errors.Is(err, context.Canceled) { break } - level.Error(util.Logger).Log("msg", "error while rate-limiting", "prefix", prefix, "err", err) + level.Error(util_log.Logger).Log("msg", "error while rate-limiting", "prefix", prefix, "err", err) backoff.Wait() continue } @@ -289,7 +290,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, // kvps being nil here is not an error -- quite the opposite. Consul returns index, // which makes next query blocking, so there is no need to detect this and act on it. if err != nil { - level.Error(util.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err) backoff.Wait() continue } @@ -309,7 +310,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, out, err := c.codec.Decode(kvp.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err) continue } if !f(kvp.Key, out) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go index 708bea76205f..5d1e4557395b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go @@ -10,7 +10,7 @@ import ( consul "github.com/hashicorp/consul/api" "github.com/cortexproject/cortex/pkg/ring/kv/codec" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type mockKV struct { @@ -78,12 +78,12 @@ func (m *mockKV) Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMet m.cond.Broadcast() - level.Debug(util.Logger).Log("msg", "Put", "key", p.Key, "value", fmt.Sprintf("%.40q", p.Value), "modify_index", m.current) + level.Debug(util_log.Logger).Log("msg", "Put", "key", p.Key, "value", fmt.Sprintf("%.40q", p.Value), "modify_index", m.current) return nil, nil } func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error) { - level.Debug(util.Logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value)) + level.Debug(util_log.Logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value)) m.mtx.Lock() defer m.mtx.Unlock() @@ -110,14 +110,14 @@ func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.Wr } func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error) { - level.Debug(util.Logger).Log("msg", "Get", "key", key, "wait_index", q.WaitIndex) + level.Debug(util_log.Logger).Log("msg", "Get", "key", key, "wait_index", q.WaitIndex) m.mtx.Lock() defer m.mtx.Unlock() value := m.kvps[key] if value == nil && q.WaitIndex == 0 { - level.Debug(util.Logger).Log("msg", "Get - not found", "key", key) + level.Debug(util_log.Logger).Log("msg", "Get - not found", "key", key) return nil, &consul.QueryMeta{LastIndex: m.current}, nil } @@ -146,17 +146,17 @@ func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consu } } if time.Now().After(deadline) { - level.Debug(util.Logger).Log("msg", "Get - deadline exceeded", "key", key) + level.Debug(util_log.Logger).Log("msg", "Get - deadline exceeded", "key", key) return nil, &consul.QueryMeta{LastIndex: q.WaitIndex}, nil } } if value == nil { - level.Debug(util.Logger).Log("msg", "Get - not found", "key", key) + level.Debug(util_log.Logger).Log("msg", "Get - not found", "key", key) return nil, &consul.QueryMeta{LastIndex: m.current}, nil } - level.Debug(util.Logger).Log("msg", "Get", "key", key, "modify_index", value.ModifyIndex, "value", fmt.Sprintf("%.40q", value.Value)) + level.Debug(util_log.Logger).Log("msg", "Get", "key", key, "modify_index", value.ModifyIndex, "value", fmt.Sprintf("%.40q", value.Value)) return copyKVPair(value), &consul.QueryMeta{LastIndex: value.ModifyIndex}, nil } @@ -203,7 +203,7 @@ func (m *mockKV) ResetIndex() { m.current = 0 m.cond.Broadcast() - level.Debug(util.Logger).Log("msg", "Reset") + level.Debug(util_log.Logger).Log("msg", "Reset") } func (m *mockKV) ResetIndexForKey(key string) { @@ -215,7 +215,7 @@ func (m *mockKV) ResetIndexForKey(key string) { } m.cond.Broadcast() - level.Debug(util.Logger).Log("msg", "ResetIndexForKey", "key", key) + level.Debug(util_log.Logger).Log("msg", "ResetIndexForKey", "key", key) } // mockedMaxWaitTime returns the minimum duration between the input duration diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go index fa1e61732689..222fb4ee3198 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go @@ -15,6 +15,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" cortex_tls "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -106,7 +107,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou for i := 0; i < c.cfg.MaxRetries; i++ { resp, err := c.cli.Get(ctx, key) if err != nil { - level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting key", "key", key, "err", err) lastErr = err continue } @@ -115,7 +116,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou if len(resp.Kvs) > 0 { intermediate, err = c.codec.Decode(resp.Kvs[0].Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) lastErr = err continue } @@ -139,7 +140,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou buf, err := c.codec.Encode(intermediate) if err != nil { - level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error serialising value", "key", key, "err", err) lastErr = err continue } @@ -149,13 +150,13 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou Then(clientv3.OpPut(key, string(buf))). Commit() if err != nil { - level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error CASing", "key", key, "err", err) lastErr = err continue } // result is not Succeeded if the the comparison was false, meaning if the modify indexes did not match. if !result.Succeeded { - level.Debug(util.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision) + level.Debug(util_log.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision) continue } @@ -183,7 +184,7 @@ outer: for backoff.Ongoing() { for resp := range c.cli.Watch(watchCtx, key) { if err := resp.Err(); err != nil { - level.Error(util.Logger).Log("msg", "watch error", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "watch error", "key", key, "err", err) continue outer } @@ -192,7 +193,7 @@ outer: for _, event := range resp.Events { out, err := c.codec.Decode(event.Kv.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } @@ -219,7 +220,7 @@ outer: for backoff.Ongoing() { for resp := range c.cli.Watch(watchCtx, key, clientv3.WithPrefix()) { if err := resp.Err(); err != nil { - level.Error(util.Logger).Log("msg", "watch error", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "watch error", "key", key, "err", err) continue outer } @@ -228,7 +229,7 @@ outer: for _, event := range resp.Events { out, err := c.codec.Decode(event.Kv.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go index c4687721233c..f739b67241cd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/hashicorp/memberlist" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // ringBroadcast implements memberlist.Broadcast interface, which is used by memberlist.TransmitLimitedQueue. @@ -45,7 +45,7 @@ func (r ringBroadcast) Invalidates(old memberlist.Broadcast) bool { // otherwise, we may be invalidating some older messages, which however covered different // ingesters if r.version >= oldb.version { - level.Debug(util.Logger).Log("msg", "Invalidating forwarded broadcast", "key", r.key, "version", r.version, "oldVersion", oldb.version, "content", fmt.Sprintf("%v", r.content), "oldContent", fmt.Sprintf("%v", oldb.content)) + level.Debug(util_log.Logger).Log("msg", "Invalidating forwarded broadcast", "key", r.key, "version", r.version, "oldVersion", oldb.version, "content", fmt.Sprintf("%v", r.content), "oldContent", fmt.Sprintf("%v", oldb.content)) return true } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go index 26495ff243bd..056cc7817187 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go @@ -23,6 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -192,7 +193,7 @@ func generateRandomSuffix() string { suffix := make([]byte, 4) _, err := rand.Read(suffix) if err != nil { - level.Error(util.Logger).Log("msg", "failed to generate random suffix", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to generate random suffix", "err", err) return "error" } return fmt.Sprintf("%2x", suffix) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go index f37d6fdc6656..6b35a1e69041 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go @@ -11,7 +11,7 @@ import ( // loggerAdapter wraps a Logger and allows it to be passed to the stdlib // logger's SetOutput. It understand and parses output produced by memberlist -// library (esp. level). Timestamp from memberlist can be ignored (eg. util.Logger +// library (esp. level). Timestamp from memberlist can be ignored (eg. pkg/util/log.Logger // is set up to auto-include timestamp with every message already) type loggerAdapter struct { log.Logger diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go index e0fcf7c9964d..2cbcb6b15a98 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -191,7 +191,7 @@ func (m *KV) createAndRegisterMetrics() { } if err != nil { - level.Error(util.Logger).Log("msg", "failed to register prometheus metrics for memberlist", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to register prometheus metrics for memberlist", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go index c899b634326e..ac7ae011df71 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go @@ -5,7 +5,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // The mockClient does not anything. @@ -13,7 +13,7 @@ import ( type mockClient struct{} func buildMockClient() (Client, error) { - level.Warn(util.Logger).Log("msg", "created mockClient for testing only") + level.Warn(util_log.Logger).Log("msg", "created mockClient for testing only") return mockClient{}, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go index 3817725fe3a3..3bfb1bcdbba7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go @@ -11,7 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" ) @@ -118,7 +118,7 @@ func NewMultiClient(cfg MultiConfig, clients []kvclient) *MultiClient { mirrorTimeout: cfg.MirrorTimeout, mirroringEnabled: atomic.NewBool(cfg.MirrorEnabled), - logger: log.With(util.Logger, "component", "multikv"), + logger: log.With(util_log.Logger, "component", "multikv"), } ctx, cancelFn := context.WithCancel(context.Background()) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index bd3d4bf3958b..4e82d645209b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -528,7 +528,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { ringDesc = in.(*Desc) } - ingesterDesc, ok := ringDesc.Ingesters[i.ID] + instanceDesc, ok := ringDesc.Ingesters[i.ID] if !ok { // The instance doesn't exist in the ring, so it's safe to set the registered timestamp // as of now. @@ -554,27 +554,27 @@ func (i *Lifecycler) initRing(ctx context.Context) error { // The instance already exists in the ring, so we can't change the registered timestamp (even if it's zero) // but we need to update the local state accordingly. - i.setRegisteredAt(ingesterDesc.GetRegisteredAt()) + i.setRegisteredAt(instanceDesc.GetRegisteredAt()) // If the ingester is in the JOINING state this means it crashed due to // a failed token transfer or some other reason during startup. We want // to set it back to PENDING in order to start the lifecycle from the // beginning. - if ingesterDesc.State == JOINING { + if instanceDesc.State == JOINING { level.Warn(log.Logger).Log("msg", "instance found in ring as JOINING, setting to PENDING", "ring", i.RingName) - ingesterDesc.State = PENDING + instanceDesc.State = PENDING return ringDesc, true, nil } // If the ingester failed to clean it's ring entry up in can leave it's state in LEAVING. // Move it into ACTIVE to ensure the ingester joins the ring. - if ingesterDesc.State == LEAVING && len(ingesterDesc.Tokens) == i.cfg.NumTokens { - ingesterDesc.State = ACTIVE + if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) == i.cfg.NumTokens { + instanceDesc.State = ACTIVE } // We exist in the ring, so assume the ring is right and copy out tokens & state out of there. - i.setState(ingesterDesc.State) + i.setState(instanceDesc.State) tokens, _ := ringDesc.TokensFor(i.ID) i.setTokens(tokens) @@ -705,18 +705,18 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { ringDesc = in.(*Desc) } - ingesterDesc, ok := ringDesc.Ingesters[i.ID] + instanceDesc, ok := ringDesc.Ingesters[i.ID] if !ok { // consul must have restarted level.Info(log.Logger).Log("msg", "found empty ring, inserting tokens", "ring", i.RingName) ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt()) } else { - ingesterDesc.Timestamp = time.Now().Unix() - ingesterDesc.State = i.GetState() - ingesterDesc.Addr = i.Addr - ingesterDesc.Zone = i.Zone - ingesterDesc.RegisteredTimestamp = i.getRegisteredAt().Unix() - ringDesc.Ingesters[i.ID] = ingesterDesc + instanceDesc.Timestamp = time.Now().Unix() + instanceDesc.State = i.GetState() + instanceDesc.Addr = i.Addr + instanceDesc.Zone = i.Zone + instanceDesc.RegisteredTimestamp = i.getRegisteredAt().Unix() + ringDesc.Ingesters[i.ID] = instanceDesc } return ringDesc, true, nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go index cd41039e57d6..4187275184f4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go @@ -12,8 +12,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" ) -// ByAddr is a sortable list of IngesterDesc. -type ByAddr []IngesterDesc +// ByAddr is a sortable list of InstanceDesc. +type ByAddr []InstanceDesc func (ts ByAddr) Len() int { return len(ts) } func (ts ByAddr) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } @@ -32,15 +32,15 @@ func GetCodec() codec.Codec { // NewDesc returns an empty ring.Desc func NewDesc() *Desc { return &Desc{ - Ingesters: map[string]IngesterDesc{}, + Ingesters: map[string]InstanceDesc{}, } } // AddIngester adds the given ingester to the ring. Ingester will only use supplied tokens, // any other tokens are removed. -func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState, registeredAt time.Time) IngesterDesc { +func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState, registeredAt time.Time) InstanceDesc { if d.Ingesters == nil { - d.Ingesters = map[string]IngesterDesc{} + d.Ingesters = map[string]InstanceDesc{} } registeredTimestamp := int64(0) @@ -48,7 +48,7 @@ func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state Ingeste registeredTimestamp = registeredAt.Unix() } - ingester := IngesterDesc{ + ingester := InstanceDesc{ Addr: addr, Timestamp: time.Now().Unix(), RegisteredTimestamp: registeredTimestamp, @@ -87,8 +87,8 @@ func (d *Desc) ClaimTokens(from, to string) Tokens { } // FindIngestersByState returns the list of ingesters in the given state -func (d *Desc) FindIngestersByState(state IngesterState) []IngesterDesc { - var result []IngesterDesc +func (d *Desc) FindIngestersByState(state IngesterState) []InstanceDesc { + var result []InstanceDesc for _, ing := range d.Ingesters { if ing.State == state { result = append(result, ing) @@ -125,7 +125,7 @@ func (d *Desc) TokensFor(id string) (myTokens, allTokens Tokens) { // GetRegisteredAt returns the timestamp when the instance has been registered to the ring // or a zero value if unknown. -func (i *IngesterDesc) GetRegisteredAt() time.Time { +func (i *InstanceDesc) GetRegisteredAt() time.Time { if i == nil || i.RegisteredTimestamp == 0 { return time.Time{} } @@ -133,7 +133,7 @@ func (i *IngesterDesc) GetRegisteredAt() time.Time { return time.Unix(i.RegisteredTimestamp, 0) } -func (i *IngesterDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool { +func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool { healthy := op.IsInstanceInStateHealthy(i.State) return healthy && now.Unix()-i.Timestamp <= heartbeatTimeout.Milliseconds()/1000 @@ -245,8 +245,8 @@ func (d *Desc) MergeContent() []string { // buildNormalizedIngestersMap will do the following: // - sorts tokens and removes duplicates (only within single ingester) // - it doesn't modify input ring -func buildNormalizedIngestersMap(inputRing *Desc) map[string]IngesterDesc { - out := map[string]IngesterDesc{} +func buildNormalizedIngestersMap(inputRing *Desc) map[string]InstanceDesc { + out := map[string]InstanceDesc{} // Make sure LEFT ingesters have no tokens for n, ing := range inputRing.Ingesters { @@ -284,7 +284,7 @@ func buildNormalizedIngestersMap(inputRing *Desc) map[string]IngesterDesc { return out } -func conflictingTokensExist(normalizedIngesters map[string]IngesterDesc) bool { +func conflictingTokensExist(normalizedIngesters map[string]InstanceDesc) bool { count := 0 for _, ing := range normalizedIngesters { count += len(ing.Tokens) @@ -309,7 +309,7 @@ func conflictingTokensExist(normalizedIngesters map[string]IngesterDesc) bool { // 2) otherwise node names are compared, and node with "lower" name wins the token // // Modifies ingesters map with updated tokens. -func resolveConflicts(normalizedIngesters map[string]IngesterDesc) { +func resolveConflicts(normalizedIngesters map[string]InstanceDesc) { size := 0 for _, ing := range normalizedIngesters { size += len(ing.Tokens) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go index adc619e85cbf..391773dff150 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go @@ -9,7 +9,7 @@ import ( // ReplicationSet describes the ingesters to talk to for a given key, and how // many errors to tolerate. type ReplicationSet struct { - Ingesters []IngesterDesc + Ingesters []InstanceDesc // Maximum number of tolerated failing instances. Max errors and max unavailable zones are // mutually exclusive. @@ -22,11 +22,11 @@ type ReplicationSet struct { // Do function f in parallel for all replicas in the set, erroring is we exceed // MaxErrors and returning early otherwise. -func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *IngesterDesc) (interface{}, error)) ([]interface{}, error) { +func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) { type instanceResult struct { res interface{} err error - instance *IngesterDesc + instance *InstanceDesc } // Initialise the result tracker, which is use to keep track of successes and failures. @@ -46,7 +46,7 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont // Spawn a goroutine for each instance. for i := range r.Ingesters { - go func(i int, ing *IngesterDesc) { + go func(i int, ing *InstanceDesc) { // Wait to send extra requests. Works only when zone-awareness is disabled. if delay > 0 && r.MaxUnavailableZones == 0 && i >= len(r.Ingesters)-r.MaxErrors { after := time.NewTimer(delay) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go index 09f12e3cebbf..fcdf5441dd24 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go @@ -3,7 +3,7 @@ package ring type replicationSetResultTracker interface { // Signals an instance has done the execution, either successful (no error) // or failed (with error). - done(instance *IngesterDesc, err error) + done(instance *InstanceDesc, err error) // Returns true if the minimum number of successful results have been received. succeeded() bool @@ -19,7 +19,7 @@ type defaultResultTracker struct { maxErrors int } -func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultResultTracker { +func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultResultTracker { return &defaultResultTracker{ minSucceeded: len(instances) - maxErrors, numSucceeded: 0, @@ -28,7 +28,7 @@ func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultRe } } -func (t *defaultResultTracker) done(_ *IngesterDesc, err error) { +func (t *defaultResultTracker) done(_ *InstanceDesc, err error) { if err == nil { t.numSucceeded++ } else { @@ -53,7 +53,7 @@ type zoneAwareResultTracker struct { maxUnavailableZones int } -func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int) *zoneAwareResultTracker { +func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int) *zoneAwareResultTracker { t := &zoneAwareResultTracker{ waitingByZone: make(map[string]int), failuresByZone: make(map[string]int), @@ -68,7 +68,7 @@ func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int return t } -func (t *zoneAwareResultTracker) done(instance *IngesterDesc, err error) { +func (t *zoneAwareResultTracker) done(instance *InstanceDesc, err error) { t.waitingByZone[instance.Zone]-- if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go index f28a54e61827..e572cb77a441 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go @@ -11,7 +11,7 @@ type ReplicationStrategy interface { // Filter out unhealthy instances and checks if there're enough instances // for an operation to succeed. Returns an error if there are not enough // instances. - Filter(instances []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []IngesterDesc, maxFailures int, err error) + Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []InstanceDesc, maxFailures int, err error) } type defaultReplicationStrategy struct{} @@ -26,7 +26,7 @@ func NewDefaultReplicationStrategy() ReplicationStrategy { // - Filters out unhealthy instances so the one doesn't even try to write to them. // - Checks there are enough instances for an operation to succeed. // The instances argument may be overwritten. -func (s *defaultReplicationStrategy) Filter(instances []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]IngesterDesc, int, error) { +func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]InstanceDesc, int, error) { // We need a response from a quorum of instances, which is n/2 + 1. In the // case of a node joining/leaving, the actual replica set might be bigger // than the replication factor, so use the bigger or the two. @@ -71,7 +71,7 @@ func NewIgnoreUnhealthyInstancesReplicationStrategy() ReplicationStrategy { return &ignoreUnhealthyInstancesReplicationStrategy{} } -func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []IngesterDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []IngesterDesc, maxFailures int, err error) { +func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []InstanceDesc, maxFailures int, err error) { now := time.Now() // Filter out unhealthy instances. for i := 0; i < len(instances); { @@ -90,7 +90,7 @@ func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []Ingeste return instances, len(instances) - 1, nil } -func (r *Ring) IsHealthy(instance *IngesterDesc, op Operation, now time.Time) bool { +func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, now time.Time) bool { return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, now) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index ad24dc31a02e..60e3b6e772ee 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -49,7 +49,7 @@ type ReadRing interface { // Get returns n (or more) instances which form the replicas for the given key. // bufDescs, bufHosts and bufZones are slices to be overwritten for the return value // to avoid memory allocation; can be nil, or created with ring.MakeBuffersForGet(). - Get(key uint32, op Operation, bufDescs []IngesterDesc, bufHosts, bufZones []string) (ReplicationSet, error) + Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error) // GetAllHealthy returns all healthy instances in the ring, for the given operation. // This function doesn't check if the quorum is honored, so doesn't fail if the number @@ -304,7 +304,7 @@ func (r *Ring) loop(ctx context.Context) error { } // Get returns n (or more) instances which form the replicas for the given key. -func (r *Ring) Get(key uint32, op Operation, bufDescs []IngesterDesc, bufHosts, bufZones []string) (ReplicationSet, error) { +func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error) { r.mtx.RLock() defer r.mtx.RUnlock() if r.ringDesc == nil || len(r.ringTokens) == 0 { @@ -380,7 +380,7 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) { } now := time.Now() - instances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) + instances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) for _, instance := range r.ringDesc.Ingesters { if r.IsHealthy(&instance, op, now) { instances = append(instances, instance) @@ -403,7 +403,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro } // Build the initial replication set, excluding unhealthy instances. - healthyInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) + healthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) zoneFailures := make(map[string]struct{}) now := time.Now() @@ -438,7 +438,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro // enabled (data is replicated to RF different zones), there's no benefit in // querying healthy instances from "failing zones". A zone is considered // failed if there is single error. - filteredInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) + filteredInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) for _, instance := range healthyInstances { if _, ok := zoneFailures[instance.Zone]; !ok { filteredInstances = append(filteredInstances, instance) @@ -648,7 +648,7 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur actualZones = []string{""} } - shard := make(map[string]IngesterDesc, size) + shard := make(map[string]InstanceDesc, size) // We need to iterate zones always in the same order to guarantee stability. for _, zone := range actualZones { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go index 30e1646acf5c..7bfadacad7b8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go @@ -60,7 +60,7 @@ func (IngesterState) EnumDescriptor() ([]byte, []int) { } type Desc struct { - Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Ingesters map[string]InstanceDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *Desc) Reset() { *m = Desc{} } @@ -95,14 +95,14 @@ func (m *Desc) XXX_DiscardUnknown() { var xxx_messageInfo_Desc proto.InternalMessageInfo -func (m *Desc) GetIngesters() map[string]IngesterDesc { +func (m *Desc) GetIngesters() map[string]InstanceDesc { if m != nil { return m.Ingesters } return nil } -type IngesterDesc struct { +type InstanceDesc struct { Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` // Unix timestamp (with seconds precision) of the last heartbeat sent // by this instance. @@ -126,17 +126,17 @@ type IngesterDesc struct { RegisteredTimestamp int64 `protobuf:"varint,8,opt,name=registered_timestamp,json=registeredTimestamp,proto3" json:"registered_timestamp,omitempty"` } -func (m *IngesterDesc) Reset() { *m = IngesterDesc{} } -func (*IngesterDesc) ProtoMessage() {} -func (*IngesterDesc) Descriptor() ([]byte, []int) { +func (m *InstanceDesc) Reset() { *m = InstanceDesc{} } +func (*InstanceDesc) ProtoMessage() {} +func (*InstanceDesc) Descriptor() ([]byte, []int) { return fileDescriptor_26381ed67e202a6e, []int{1} } -func (m *IngesterDesc) XXX_Unmarshal(b []byte) error { +func (m *InstanceDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *InstanceDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_IngesterDesc.Marshal(b, m, deterministic) + return xxx_messageInfo_InstanceDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -146,54 +146,54 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *IngesterDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngesterDesc.Merge(m, src) +func (m *InstanceDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstanceDesc.Merge(m, src) } -func (m *IngesterDesc) XXX_Size() int { +func (m *InstanceDesc) XXX_Size() int { return m.Size() } -func (m *IngesterDesc) XXX_DiscardUnknown() { - xxx_messageInfo_IngesterDesc.DiscardUnknown(m) +func (m *InstanceDesc) XXX_DiscardUnknown() { + xxx_messageInfo_InstanceDesc.DiscardUnknown(m) } -var xxx_messageInfo_IngesterDesc proto.InternalMessageInfo +var xxx_messageInfo_InstanceDesc proto.InternalMessageInfo -func (m *IngesterDesc) GetAddr() string { +func (m *InstanceDesc) GetAddr() string { if m != nil { return m.Addr } return "" } -func (m *IngesterDesc) GetTimestamp() int64 { +func (m *InstanceDesc) GetTimestamp() int64 { if m != nil { return m.Timestamp } return 0 } -func (m *IngesterDesc) GetState() IngesterState { +func (m *InstanceDesc) GetState() IngesterState { if m != nil { return m.State } return ACTIVE } -func (m *IngesterDesc) GetTokens() []uint32 { +func (m *InstanceDesc) GetTokens() []uint32 { if m != nil { return m.Tokens } return nil } -func (m *IngesterDesc) GetZone() string { +func (m *InstanceDesc) GetZone() string { if m != nil { return m.Zone } return "" } -func (m *IngesterDesc) GetRegisteredTimestamp() int64 { +func (m *InstanceDesc) GetRegisteredTimestamp() int64 { if m != nil { return m.RegisteredTimestamp } @@ -203,41 +203,41 @@ func (m *IngesterDesc) GetRegisteredTimestamp() int64 { func init() { proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value) proto.RegisterType((*Desc)(nil), "ring.Desc") - proto.RegisterMapType((map[string]IngesterDesc)(nil), "ring.Desc.IngestersEntry") - proto.RegisterType((*IngesterDesc)(nil), "ring.IngesterDesc") + proto.RegisterMapType((map[string]InstanceDesc)(nil), "ring.Desc.IngestersEntry") + proto.RegisterType((*InstanceDesc)(nil), "ring.InstanceDesc") } func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 421 bytes of a gzipped FileDescriptorProto + // 427 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x31, 0x6f, 0xd3, 0x40, 0x1c, 0xc5, 0xef, 0x1f, 0x5f, 0x5c, 0xe7, 0x1f, 0x5a, 0x59, 0x57, 0x84, 0x4c, 0x85, 0x0e, 0xab, 0x93, 0x41, 0xc2, 0x15, 0x81, 0x01, 0x21, 0x31, 0xb4, 0xd4, 0x20, 0x5b, 0x51, 0xa8, 0x4c, 0xd4, - 0x15, 0x39, 0xcd, 0x61, 0xac, 0x12, 0xbb, 0xb2, 0x2f, 0x48, 0x65, 0xe2, 0x23, 0xf0, 0x05, 0xd8, - 0xf9, 0x28, 0x1d, 0x33, 0xa1, 0x4e, 0x88, 0x38, 0x0b, 0x63, 0x3f, 0x02, 0xba, 0x73, 0x23, 0x93, - 0xed, 0xfd, 0xfc, 0xde, 0xff, 0x3d, 0x0f, 0x87, 0x58, 0x66, 0x79, 0xea, 0x5f, 0x94, 0x85, 0x2c, - 0x18, 0x55, 0x7a, 0xef, 0x49, 0x9a, 0xc9, 0x4f, 0xf3, 0x89, 0x7f, 0x56, 0xcc, 0x0e, 0xd2, 0x22, - 0x2d, 0x0e, 0xb4, 0x39, 0x99, 0x7f, 0xd4, 0xa4, 0x41, 0xab, 0xe6, 0x68, 0xff, 0x07, 0x20, 0x3d, - 0x16, 0xd5, 0x19, 0x7b, 0x85, 0xbd, 0x2c, 0x4f, 0x45, 0x25, 0x45, 0x59, 0x39, 0xe0, 0x1a, 0x5e, - 0x7f, 0x70, 0xdf, 0xd7, 0xed, 0xca, 0xf6, 0xc3, 0xb5, 0x17, 0xe4, 0xb2, 0xbc, 0x3c, 0xa2, 0x57, - 0xbf, 0x1f, 0x92, 0xb8, 0xbd, 0xd8, 0x3b, 0xc1, 0x9d, 0xcd, 0x08, 0xb3, 0xd1, 0x38, 0x17, 0x97, - 0x0e, 0xb8, 0xe0, 0xf5, 0x62, 0x25, 0x99, 0x87, 0xdd, 0x2f, 0xc9, 0xe7, 0xb9, 0x70, 0x3a, 0x2e, - 0x78, 0xfd, 0x01, 0x6b, 0xea, 0xd7, 0x67, 0x6a, 0x26, 0x6e, 0x02, 0x2f, 0x3b, 0x2f, 0x20, 0xa2, - 0x56, 0xc7, 0x36, 0xf6, 0x7f, 0x01, 0xde, 0xf9, 0x3f, 0xc1, 0x18, 0xd2, 0x64, 0x3a, 0x2d, 0x6f, - 0x7b, 0xb5, 0x66, 0x0f, 0xb0, 0x27, 0xb3, 0x99, 0xa8, 0x64, 0x32, 0xbb, 0xd0, 0xe5, 0x46, 0xdc, - 0x7e, 0x60, 0x8f, 0xb0, 0x5b, 0xc9, 0x44, 0x0a, 0xc7, 0x70, 0xc1, 0xdb, 0x19, 0xec, 0x6e, 0xce, - 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, 0x2e, 0xf2, 0xca, 0x31, 0x5d, 0xc3, - 0xdb, 0x8e, 0x6f, 0x49, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, 0x35, 0xa3, 0x4a, 0xb3, 0xa7, 0x78, - 0xb7, 0x14, 0x69, 0xa6, 0x3a, 0xc4, 0xf4, 0x43, 0xbb, 0x6f, 0xe9, 0xfd, 0xdd, 0xd6, 0x1b, 0xaf, - 0xad, 0x88, 0x5a, 0xd4, 0xee, 0x46, 0xd4, 0xea, 0xda, 0xe6, 0xe3, 0x21, 0x6e, 0x6f, 0xfc, 0x02, - 0x43, 0x34, 0x0f, 0x5f, 0x8f, 0xc3, 0xd3, 0xc0, 0x26, 0xac, 0x8f, 0x5b, 0xc3, 0xe0, 0xf0, 0x34, - 0x1c, 0xbd, 0xb5, 0x41, 0xc1, 0x49, 0x30, 0x3a, 0x56, 0xd0, 0x51, 0x10, 0xbd, 0x0b, 0x47, 0x0a, - 0x0c, 0x66, 0x21, 0x1d, 0x06, 0x6f, 0xc6, 0x36, 0x3d, 0x7a, 0xbe, 0x58, 0x72, 0x72, 0xbd, 0xe4, - 0xe4, 0x66, 0xc9, 0xe1, 0x5b, 0xcd, 0xe1, 0x67, 0xcd, 0xe1, 0xaa, 0xe6, 0xb0, 0xa8, 0x39, 0xfc, - 0xa9, 0x39, 0xfc, 0xad, 0x39, 0xb9, 0xa9, 0x39, 0x7c, 0x5f, 0x71, 0xb2, 0x58, 0x71, 0x72, 0xbd, - 0xe2, 0x64, 0x62, 0xea, 0x37, 0xf0, 0xec, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0xd8, 0x87, - 0x71, 0x46, 0x02, 0x00, 0x00, + 0x15, 0x39, 0xc9, 0x61, 0xac, 0x12, 0xbb, 0xb2, 0x2f, 0x48, 0x65, 0xe2, 0x23, 0xf0, 0x05, 0xd8, + 0xf9, 0x28, 0x1d, 0x33, 0xa1, 0x4e, 0x88, 0x38, 0x0b, 0x63, 0x3f, 0x02, 0xba, 0x73, 0x23, 0xd3, + 0xed, 0xfd, 0xfc, 0xde, 0xbd, 0xf7, 0x1f, 0x8c, 0x58, 0x66, 0x79, 0xea, 0x9f, 0x97, 0x85, 0x2c, + 0x18, 0x55, 0x7a, 0xef, 0x49, 0x9a, 0xc9, 0x4f, 0x8b, 0x89, 0x3f, 0x2d, 0xe6, 0x07, 0x69, 0x91, + 0x16, 0x07, 0xda, 0x9c, 0x2c, 0x3e, 0x6a, 0xd2, 0xa0, 0x55, 0xf3, 0x68, 0xff, 0x07, 0x20, 0x3d, + 0x16, 0xd5, 0x94, 0xbd, 0xc2, 0x5e, 0x96, 0xa7, 0xa2, 0x92, 0xa2, 0xac, 0x1c, 0x70, 0x0d, 0xaf, + 0x3f, 0xb8, 0xef, 0xeb, 0x76, 0x65, 0xfb, 0xe1, 0xc6, 0x0b, 0x72, 0x59, 0x5e, 0x1c, 0xd1, 0xcb, + 0xdf, 0x0f, 0x49, 0xdc, 0xbe, 0xd8, 0x3b, 0xc1, 0x9d, 0xdb, 0x11, 0x66, 0xa3, 0x71, 0x26, 0x2e, + 0x1c, 0x70, 0xc1, 0xeb, 0xc5, 0x4a, 0x32, 0x0f, 0xbb, 0x5f, 0x92, 0xcf, 0x0b, 0xe1, 0x74, 0x5c, + 0xf0, 0xfa, 0x03, 0xd6, 0xd4, 0x87, 0x79, 0x25, 0x93, 0x7c, 0x2a, 0xd4, 0x4c, 0xdc, 0x04, 0x5e, + 0x76, 0x5e, 0x40, 0x44, 0xad, 0x8e, 0x6d, 0xec, 0xff, 0x02, 0xbc, 0xf3, 0x7f, 0x82, 0x31, 0xa4, + 0xc9, 0x6c, 0x56, 0xde, 0xf4, 0x6a, 0xcd, 0x1e, 0x60, 0x4f, 0x66, 0x73, 0x51, 0xc9, 0x64, 0x7e, + 0xae, 0xcb, 0x8d, 0xb8, 0xfd, 0xc0, 0x1e, 0x61, 0xb7, 0x92, 0x89, 0x14, 0x8e, 0xe1, 0x82, 0xb7, + 0x33, 0xd8, 0xdd, 0xcc, 0x36, 0xd7, 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, + 0x26, 0xf2, 0xca, 0x31, 0x5d, 0xc3, 0xdb, 0x8e, 0x6f, 0x48, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, + 0x35, 0xa3, 0x4a, 0xb3, 0xa7, 0x78, 0xb7, 0x14, 0x69, 0xa6, 0x3a, 0xc4, 0xec, 0x43, 0xbb, 0x6f, + 0xe9, 0xfd, 0xdd, 0xd6, 0x1b, 0x6f, 0xac, 0x88, 0x5a, 0xd4, 0xee, 0x46, 0xd4, 0xea, 0xda, 0xe6, + 0xe3, 0x21, 0x6e, 0xdf, 0x3a, 0x81, 0x21, 0x9a, 0x87, 0xaf, 0xc7, 0xe1, 0x69, 0x60, 0x13, 0xd6, + 0xc7, 0xad, 0x61, 0x70, 0x78, 0x1a, 0x8e, 0xde, 0xda, 0xa0, 0xe0, 0x24, 0x18, 0x1d, 0x2b, 0xe8, + 0x28, 0x88, 0xde, 0x85, 0x23, 0x05, 0x06, 0xb3, 0x90, 0x0e, 0x83, 0x37, 0x63, 0x9b, 0x1e, 0x3d, + 0x5f, 0xae, 0x38, 0xb9, 0x5a, 0x71, 0x72, 0xbd, 0xe2, 0xf0, 0xad, 0xe6, 0xf0, 0xb3, 0xe6, 0x70, + 0x59, 0x73, 0x58, 0xd6, 0x1c, 0xfe, 0xd4, 0x1c, 0xfe, 0xd6, 0x9c, 0x5c, 0xd7, 0x1c, 0xbe, 0xaf, + 0x39, 0x59, 0xae, 0x39, 0xb9, 0x5a, 0x73, 0x32, 0x31, 0xf5, 0x3f, 0xf0, 0xec, 0x5f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x79, 0x5b, 0xe1, 0x8b, 0x46, 0x02, 0x00, 0x00, } func (x IngesterState) String() string { @@ -278,14 +278,14 @@ func (this *Desc) Equal(that interface{}) bool { } return true } -func (this *IngesterDesc) Equal(that interface{}) bool { +func (this *InstanceDesc) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*IngesterDesc) + that1, ok := that.(*InstanceDesc) if !ok { - that2, ok := that.(IngesterDesc) + that2, ok := that.(InstanceDesc) if ok { that1 = &that2 } else { @@ -333,7 +333,7 @@ func (this *Desc) GoString() string { keysForIngesters = append(keysForIngesters, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForIngesters) - mapStringForIngesters := "map[string]IngesterDesc{" + mapStringForIngesters := "map[string]InstanceDesc{" for _, k := range keysForIngesters { mapStringForIngesters += fmt.Sprintf("%#v: %#v,", k, this.Ingesters[k]) } @@ -344,12 +344,12 @@ func (this *Desc) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *IngesterDesc) GoString() string { +func (this *InstanceDesc) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 10) - s = append(s, "&ring.IngesterDesc{") + s = append(s, "&ring.InstanceDesc{") s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") @@ -414,7 +414,7 @@ func (m *Desc) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngesterDesc) Marshal() (dAtA []byte, err error) { +func (m *InstanceDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -424,12 +424,12 @@ func (m *IngesterDesc) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngesterDesc) MarshalTo(dAtA []byte) (int, error) { +func (m *InstanceDesc) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngesterDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InstanceDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -513,7 +513,7 @@ func (m *Desc) Size() (n int) { return n } -func (m *IngesterDesc) Size() (n int) { +func (m *InstanceDesc) Size() (n int) { if m == nil { return 0 } @@ -561,7 +561,7 @@ func (this *Desc) String() string { keysForIngesters = append(keysForIngesters, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForIngesters) - mapStringForIngesters := "map[string]IngesterDesc{" + mapStringForIngesters := "map[string]InstanceDesc{" for _, k := range keysForIngesters { mapStringForIngesters += fmt.Sprintf("%v: %v,", k, this.Ingesters[k]) } @@ -572,11 +572,11 @@ func (this *Desc) String() string { }, "") return s } -func (this *IngesterDesc) String() string { +func (this *InstanceDesc) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngesterDesc{`, + s := strings.Join([]string{`&InstanceDesc{`, `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, @@ -654,10 +654,10 @@ func (m *Desc) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Ingesters == nil { - m.Ingesters = make(map[string]IngesterDesc) + m.Ingesters = make(map[string]InstanceDesc) } var mapkey string - mapvalue := &IngesterDesc{} + mapvalue := &InstanceDesc{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -731,7 +731,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &IngesterDesc{} + mapvalue = &InstanceDesc{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -777,7 +777,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngesterDesc) Unmarshal(dAtA []byte) error { +func (m *InstanceDesc) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -800,10 +800,10 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngesterDesc: wiretype end group for non-group") + return fmt.Errorf("proto: InstanceDesc: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngesterDesc: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InstanceDesc: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto index 2adc91a806c3..4eab6f733ccf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto @@ -8,11 +8,11 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message Desc { - map ingesters = 1 [(gogoproto.nullable) = false]; + map ingesters = 1 [(gogoproto.nullable) = false]; reserved 2; } -message IngesterDesc { +message InstanceDesc { reserved 4, 5; // old, deprecated fields string addr = 1; diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go index 921900c2dc1a..b1cf8210c75c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go @@ -122,8 +122,8 @@ func WaitRingStability(ctx context.Context, r *Ring, op Operation, minStability, } // MakeBuffersForGet returns buffers to use with Ring.Get(). -func MakeBuffersForGet() (bufDescs []IngesterDesc, bufHosts, bufZones []string) { - bufDescs = make([]IngesterDesc, 0, GetBufferSize) +func MakeBuffersForGet() (bufDescs []InstanceDesc, bufHosts, bufZones []string) { + bufDescs = make([]InstanceDesc, 0, GetBufferSize) bufHosts = make([]string, 0, GetBufferSize) bufZones = make([]string, 0, GetBufferSize) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go index 47a5be6a07dd..62f83fa8313b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go @@ -4,7 +4,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" ) -func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { +func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) { // When we initialize the ruler instance in the ring we want to start from // a clean situation, so whatever is the state we set it ACTIVE, while we keep existing // tokens (if any). @@ -24,5 +24,5 @@ func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.De func (r *Ruler) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} func (r *Ruler) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) { +func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go index 9bee7d46da35..b2a1a0e55182 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go @@ -171,8 +171,7 @@ func (r *DefaultMultiTenantManager) newManager(ctx context.Context, userID strin reg := prometheus.NewRegistry() r.userManagerMetrics.AddUserRegistry(userID, reg) - logger := log.With(r.logger, "user", userID) - return r.managerFactory(ctx, userID, notifier, logger, reg), nil + return r.managerFactory(ctx, userID, notifier, r.logger, reg), nil } func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string) (*notifier.Manager, error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go index 8d78ab0e3150..5f0f60925717 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go @@ -2,6 +2,7 @@ package ruler import ( "context" + "flag" "fmt" "net/url" "regexp" @@ -16,8 +17,21 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/dns" "github.com/prometheus/prometheus/notifier" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/tls" ) +type NotifierConfig struct { + TLS tls.ClientConfig `yaml:",inline"` + BasicAuth util.BasicAuth `yaml:",inline"` +} + +func (cfg *NotifierConfig) RegisterFlags(f *flag.FlagSet) { + cfg.TLS.RegisterFlagsWithPrefix("ruler.alertmanager-client", f) + cfg.BasicAuth.RegisterFlagsWithPrefix("ruler.alertmanager-client.", f) +} + // rulerNotifier bundles a notifier.Manager together with an associated // Alertmanager service discovery manager and handles the lifecycle // of both actors. @@ -150,13 +164,21 @@ func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.Alertm PathPrefix: url.Path, Timeout: model.Duration(rulerConfig.NotificationTimeout), ServiceDiscoveryConfigs: sdConfig, + HTTPClientConfig: config_util.HTTPClientConfig{ + TLSConfig: config_util.TLSConfig{ + CAFile: rulerConfig.Notifier.TLS.CAPath, + CertFile: rulerConfig.Notifier.TLS.CertPath, + KeyFile: rulerConfig.Notifier.TLS.KeyPath, + InsecureSkipVerify: rulerConfig.Notifier.TLS.InsecureSkipVerify, + ServerName: rulerConfig.Notifier.TLS.ServerName, + }, + }, } + // Check the URL for basic authentication information first if url.User != nil { - amConfig.HTTPClientConfig = config_util.HTTPClientConfig{ - BasicAuth: &config_util.BasicAuth{ - Username: url.User.Username(), - }, + amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{ + Username: url.User.Username(), } if password, isSet := url.User.Password(); isSet { @@ -164,5 +186,13 @@ func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.Alertm } } + // Override URL basic authentication configs with hard coded config values if present + if rulerConfig.Notifier.BasicAuth.IsEnabled() { + amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{ + Username: rulerConfig.Notifier.BasicAuth.Username, + Password: config_util.Secret(rulerConfig.Notifier.BasicAuth.Password), + } + } + return amConfig } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index d5ac2fe70d2b..6b4433d3acee 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -86,6 +86,8 @@ type Config struct { NotificationQueueCapacity int `yaml:"notification_queue_capacity"` // HTTP timeout duration when sending notifications to the Alertmanager. NotificationTimeout time.Duration `yaml:"notification_timeout"` + // Client configs for interacting with the Alertmanager + Notifier NotifierConfig `yaml:"alertmanager_client"` // Max time to tolerate outage for restoring "for" state of alert. OutageTolerance time.Duration `yaml:"for_outage_tolerance"` @@ -130,6 +132,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ClientTLSConfig.RegisterFlagsWithPrefix("ruler.client", f) cfg.StoreConfig.RegisterFlags(f) cfg.Ring.RegisterFlags(f) + cfg.Notifier.RegisterFlags(f) // Deprecated Flags that will be maintained to avoid user disruption flagext.DeprecatedFlag(f, "ruler.client-timeout", "This flag has been renamed to ruler.configs.client-timeout") diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go index 3bd18e9a247d..2ea58e7cfbd6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go @@ -10,8 +10,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -54,7 +54,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go index 1fb7d65586c1..344c6d8f029b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ruler/rules" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Object Rule Storage Schema @@ -49,7 +49,7 @@ func NewRuleStore(client chunk.ObjectClient, loadConcurrency int) *RuleStore { func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rules.RuleGroupDesc) (*rules.RuleGroupDesc, error) { reader, err := o.client.GetObject(ctx, objectKey) if err == chunk.ErrStorageObjectNotFound { - level.Debug(util.Logger).Log("msg", "rule group does not exist", "name", objectKey) + level.Debug(util_log.Logger).Log("msg", "rule group does not exist", "name", objectKey) return nil, rules.ErrGroupNotFound } @@ -139,10 +139,10 @@ func (o *RuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string] key := generateRuleObjectKey(user, namespace, group) - level.Debug(util.Logger).Log("msg", "loading rule group", "key", key, "user", user) + level.Debug(util_log.Logger).Log("msg", "loading rule group", "key", key, "user", user) gr, err := o.getRuleGroup(gCtx, key, gr) // reuse group pointer from the map. if err != nil { - level.Error(util.Logger).Log("msg", "failed to get rule group", "key", key, "user", user) + level.Error(util_log.Logger).Log("msg", "failed to get rule group", "key", key, "user", user) return err } @@ -227,10 +227,10 @@ func (o *RuleStore) DeleteNamespace(ctx context.Context, userID, namespace strin } for _, obj := range ruleGroupObjects { - level.Debug(util.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key) + level.Debug(util_log.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key) err = o.client.DeleteObject(ctx, obj.Key) if err != nil { - level.Error(util.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key) + level.Error(util_log.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key) return err } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go index 51dab86036f7..2f17bbe737ee 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go @@ -28,6 +28,11 @@ func newS3Config(cfg Config) s3.Config { IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout), ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout), InsecureSkipVerify: cfg.HTTP.InsecureSkipVerify, + TLSHandshakeTimeout: model.Duration(cfg.HTTP.TLSHandshakeTimeout), + ExpectContinueTimeout: model.Duration(cfg.HTTP.ExpectContinueTimeout), + MaxIdleConns: cfg.HTTP.MaxIdleConns, + MaxIdleConnsPerHost: cfg.HTTP.MaxIdleConnsPerHost, + MaxConnsPerHost: cfg.HTTP.MaxConnsPerHost, Transport: cfg.HTTP.Transport, }, // Enforce signature version 2 if CLI flag is set diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go index 96db7e1f0c12..17d2f77f7da3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go @@ -27,6 +27,11 @@ type HTTPConfig struct { IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"` ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout"` InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + TLSHandshakeTimeout time.Duration `yaml:"tls_handshake_timeout"` + ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout"` + MaxIdleConns int `yaml:"max_idle_connections"` + MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host"` + MaxConnsPerHost int `yaml:"max_connections_per_host"` // Allow upstream callers to inject a round tripper Transport http.RoundTripper `yaml:"-"` @@ -37,6 +42,11 @@ func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.") f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.") f.BoolVar(&cfg.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "If the client connects to S3 via HTTPS and this option is enabled, the client will accept any certificate and hostname.") + f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"s3.tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.") + f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"s3.expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.") + f.IntVar(&cfg.MaxIdleConns, prefix+"s3.max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.") + f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"s3.max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.") + f.IntVar(&cfg.MaxConnsPerHost, prefix+"s3.max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.") } // Config holds the config options for an S3 backend diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 0724fd6d160f..11c635cd7d14 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -227,10 +227,9 @@ type BucketStoreConfig struct { IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` BucketIndex BucketIndexConfig `yaml:"bucket_index"` - // Controls whether index-header lazy loading is enabled. This config option is hidden - // while it is marked as experimental. - IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled" doc:"hidden"` - IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout" doc:"hidden"` + // Controls whether index-header lazy loading is enabled. + IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled"` + IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout"` // Controls what is the ratio of postings offsets store will hold in memory. // Larger value will keep less offsets, which will increase CPU cycles needed for query touching those postings. diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go index a971aee7a55a..5253b1f66285 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go @@ -11,7 +11,7 @@ import ( "github.com/pkg/errors" "github.com/thanos-io/thanos/pkg/objstore" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Relative to user-specific prefix. @@ -65,7 +65,7 @@ func ReadTenantDeletionMark(ctx context.Context, bkt objstore.BucketReader, user // Close reader before dealing with decode error. if closeErr := r.Close(); closeErr != nil { - level.Warn(util.Logger).Log("msg", "failed to close bucket reader", "err", closeErr) + level.Warn(util_log.Logger).Log("msg", "failed to close bucket reader", "err", closeErr) } if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go index 182f5cefb69b..db7716a5b591 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go @@ -330,7 +330,7 @@ func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValues return g.stores.LabelValues(ctx, req) } -func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { +func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) { // When we initialize the store-gateway instance in the ring we want to start from // a clean situation, so whatever is the state we set it JOINING, while we keep existing // tokens (if any) or the ones loaded from file. @@ -350,7 +350,7 @@ func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc func (g *StoreGateway) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) { +func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { } func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go index 01d466b4558a..22ce3ebea643 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) { }{Message: message}) if err != nil { - level.Error(util.Logger).Log("msg", "unable to serve store gateway ring page", "err", err) + level.Error(util_log.Logger).Log("msg", "unable to serve store gateway ring page", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go index e977c84ca763..8f7f30bc87d7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go @@ -10,8 +10,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -72,7 +72,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/http.go b/vendor/github.com/cortexproject/cortex/pkg/util/http.go index f02da30b8d1d..f06363e537f0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/http.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "flag" "fmt" "html/template" "io" @@ -19,6 +20,22 @@ import ( const messageSizeLargerErrFmt = "received message larger than max (%d vs %d)" +// BasicAuth configures basic authentication for HTTP clients. +type BasicAuth struct { + Username string `yaml:"basic_auth_username"` + Password string `yaml:"basic_auth_password"` +} + +func (b *BasicAuth) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&b.Username, prefix+"basic-auth-username", "", "HTTP Basic authentication username. It overrides the username set in the URL (if any).") + f.StringVar(&b.Password, prefix+"basic-auth-password", "", "HTTP Basic authentication password. It overrides the password set in the URL (if any).") +} + +// IsEnabled returns false if basic authentication isn't enabled. +func (b BasicAuth) IsEnabled() bool { + return b.Username != "" || b.Password != "" +} + // WriteJSONResponse writes some JSON as a HTTP response. func WriteJSONResponse(w http.ResponseWriter, v interface{}) { w.Header().Set("Content-Type", "application/json") diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go deleted file mode 100644 index df0d464d4570..000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go +++ /dev/null @@ -1,96 +0,0 @@ -package util - -import ( - "os" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/logging" - "github.com/weaveworks/common/server" - - logutil "github.com/cortexproject/cortex/pkg/util/log" -) - -var ( - // Logger is a shared go-kit logger. - // TODO: Change all components to take a non-global logger via their constructors. - // Deprecated and moved to `pkg/util/log`. Prefer accepting a non-global logger as an argument. - Logger = logutil.Logger - - logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "log_messages_total", - Help: "Total number of log messages.", - }, []string{"level"}) - - supportedLevels = []level.Value{ - level.DebugValue(), - level.InfoValue(), - level.WarnValue(), - level.ErrorValue(), - } -) - -func init() { - prometheus.MustRegister(logMessages) -} - -// InitLogger initialises the global gokit logger (util.Logger) and overrides the -// default logger for the server. -func InitLogger(cfg *server.Config) { - l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat) - if err != nil { - panic(err) - } - - // when use util.Logger, skip 3 stack frames. - Logger = log.With(l, "caller", log.Caller(3)) - - // cfg.Log wraps log function, skip 4 stack frames to get caller information. - // this works in go 1.12, but doesn't work in versions earlier. - // it will always shows the wrapper function generated by compiler - // marked in old versions. - cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4))) -} - -// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels. -type PrometheusLogger struct { - logger log.Logger -} - -// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes -// Prometheus counters for various log levels. -func NewPrometheusLogger(l logging.Level, format logging.Format) (log.Logger, error) { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - if format.String() == "json" { - logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) - } - logger = level.NewFilter(logger, l.Gokit) - - // Initialise counters for all supported levels: - for _, level := range supportedLevels { - logMessages.WithLabelValues(level.String()) - } - - logger = &PrometheusLogger{ - logger: logger, - } - - // return a Logger without caller information, shouldn't use directly - logger = log.With(logger, "ts", log.DefaultTimestampUTC) - return logger, nil -} - -// Log increments the appropriate Prometheus counter depending on the log level. -func (pl *PrometheusLogger) Log(kv ...interface{}) error { - pl.logger.Log(kv...) - l := "unknown" - for i := 1; i < len(kv); i += 2 { - if v, ok := kv[i].(level.Value); ok { - l = v.String() - break - } - } - logMessages.WithLabelValues(l).Inc() - return nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go index 2f146db276c8..92ea3f697d18 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go @@ -7,6 +7,9 @@ import ( "github.com/go-kit/kit/log" kitlog "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/logging" + "github.com/weaveworks/common/server" ) var ( @@ -14,8 +17,84 @@ var ( // TODO: Change all components to take a non-global logger via their constructors. // Prefer accepting a non-global logger as an argument. Logger = kitlog.NewNopLogger() + + logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "log_messages_total", + Help: "Total number of log messages.", + }, []string{"level"}) + + supportedLevels = []level.Value{ + level.DebugValue(), + level.InfoValue(), + level.WarnValue(), + level.ErrorValue(), + } ) +func init() { + prometheus.MustRegister(logMessages) +} + +// InitLogger initialises the global gokit logger (util_log.Logger) and overrides the +// default logger for the server. +func InitLogger(cfg *server.Config) { + l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat) + if err != nil { + panic(err) + } + + // when use util_log.Logger, skip 3 stack frames. + Logger = log.With(l, "caller", log.Caller(3)) + + // cfg.Log wraps log function, skip 4 stack frames to get caller information. + // this works in go 1.12, but doesn't work in versions earlier. + // it will always shows the wrapper function generated by compiler + // marked in old versions. + cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4))) +} + +// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels. +type PrometheusLogger struct { + logger log.Logger +} + +// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes +// Prometheus counters for various log levels. +func NewPrometheusLogger(l logging.Level, format logging.Format) (log.Logger, error) { + logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + if format.String() == "json" { + logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) + } + logger = level.NewFilter(logger, l.Gokit) + + // Initialise counters for all supported levels: + for _, level := range supportedLevels { + logMessages.WithLabelValues(level.String()) + } + + logger = &PrometheusLogger{ + logger: logger, + } + + // return a Logger without caller information, shouldn't use directly + logger = log.With(logger, "ts", log.DefaultTimestampUTC) + return logger, nil +} + +// Log increments the appropriate Prometheus counter depending on the log level. +func (pl *PrometheusLogger) Log(kv ...interface{}) error { + pl.logger.Log(kv...) + l := "unknown" + for i := 1; i < len(kv); i += 2 { + if v, ok := kv[i].(level.Value); ok { + l = v.String() + break + } + } + logMessages.WithLabelValues(l).Inc() + return nil +} + // CheckFatal prints an error and exits with error code 1 if err is non-nil func CheckFatal(location string, err error) { if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go index e90b21dd18e7..be413ee236dd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go @@ -11,6 +11,8 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/prometheus/pkg/labels" + + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Data for single value (counter/gauge) with labels. @@ -583,7 +585,7 @@ func (r *UserRegistries) RemoveUserRegistry(user string, hard bool) { func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool { last, err := ur.reg.Gather() if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) return false } @@ -605,7 +607,7 @@ func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool { ur.lastGather, err = NewMetricFamilyMap(last) if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) return false } @@ -656,7 +658,7 @@ func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser { } if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err) continue } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go index be12157ac266..0d4fb43d1f2b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go @@ -7,6 +7,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -45,7 +46,7 @@ func (w *moduleService) start(serviceContext context.Context) error { continue } - level.Debug(Logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m) + level.Debug(util_log.Logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m) err := s.AwaitRunning(serviceContext) if err != nil { @@ -55,7 +56,7 @@ func (w *moduleService) start(serviceContext context.Context) error { // we don't want to let this service to stop until all dependant services are stopped, // so we use independent context here - level.Info(Logger).Log("msg", "initialising", "module", w.name) + level.Info(util_log.Logger).Log("msg", "initialising", "module", w.name) err := w.service.StartAsync(context.Background()) if err != nil { return errors.Wrapf(err, "error starting module: %s", w.name) @@ -77,7 +78,7 @@ func (w *moduleService) stop(_ error) error { // Only wait for other modules, if underlying service is still running. w.waitForModulesToStop() - level.Debug(Logger).Log("msg", "stopping", "module", w.name) + level.Debug(util_log.Logger).Log("msg", "stopping", "module", w.name) err = services.StopAndAwaitTerminated(context.Background(), w.service) } else { @@ -85,9 +86,9 @@ func (w *moduleService) stop(_ error) error { } if err != nil && err != ErrStopProcess { - level.Warn(Logger).Log("msg", "module failed with error", "module", w.name, "err", err) + level.Warn(util_log.Logger).Log("msg", "module failed with error", "module", w.name, "err", err) } else { - level.Info(Logger).Log("msg", "module stopped", "module", w.name) + level.Info(util_log.Logger).Log("msg", "module stopped", "module", w.name) } return err } @@ -100,7 +101,7 @@ func (w *moduleService) waitForModulesToStop() { continue } - level.Debug(Logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n) + level.Debug(util_log.Logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n) // Passed context isn't canceled, so we can only get error here, if service // fails. But we don't care *how* service stops, as long as it is done. _ = s.AwaitTerminated(context.Background()) diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/net.go b/vendor/github.com/cortexproject/cortex/pkg/util/net.go index e0fa12e6ffaa..f4cd184870fc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/net.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/net.go @@ -5,6 +5,8 @@ import ( "net" "github.com/go-kit/kit/log/level" + + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // GetFirstAddressOf returns the first IPv4 address of the supplied interface names. @@ -12,17 +14,17 @@ func GetFirstAddressOf(names []string) (string, error) { for _, name := range names { inf, err := net.InterfaceByName(name) if err != nil { - level.Warn(Logger).Log("msg", "error getting interface", "inf", name, "err", err) + level.Warn(util_log.Logger).Log("msg", "error getting interface", "inf", name, "err", err) continue } addrs, err := inf.Addrs() if err != nil { - level.Warn(Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) + level.Warn(util_log.Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) continue } if len(addrs) <= 0 { - level.Warn(Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) + level.Warn(util_log.Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go index 71df1e30e0d2..6447508cde14 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go @@ -16,7 +16,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -118,7 +118,7 @@ func (om *Manager) CloseListenerChannel(listener <-chan interface{}) { func (om *Manager) loop(ctx context.Context) error { if om.cfg.LoadPath == "" { - level.Info(util.Logger).Log("msg", "runtime config disabled: file not specified") + level.Info(util_log.Logger).Log("msg", "runtime config disabled: file not specified") <-ctx.Done() return nil } @@ -132,7 +132,7 @@ func (om *Manager) loop(ctx context.Context) error { err := om.loadConfig() if err != nil { // Log but don't stop on error - we don't want to halt all ingesters because of a typo - level.Error(util.Logger).Log("msg", "failed to load config", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to load config", "err", err) } case <-ctx.Done(): return nil diff --git a/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go b/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go index 8b6267786a77..1410ea748ccb 100644 --- a/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go +++ b/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go @@ -10,7 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type InstrumentationServer struct { @@ -44,7 +44,7 @@ func (s *InstrumentationServer) Start() error { go func() { if err := s.srv.Serve(listener); err != nil { - level.Error(util.Logger).Log("msg", "metrics server terminated", "err", err) + level.Error(util_log.Logger).Log("msg", "metrics server terminated", "err", err) } }() diff --git a/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go b/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go index cde006481f7d..b9628b4da4f9 100644 --- a/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go +++ b/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go @@ -10,7 +10,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type ResponsesComparator interface { @@ -127,7 +127,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back result := comparisonSuccess err := p.compareResponses(expectedResponse, actualResponse) if err != nil { - level.Error(util.Logger).Log("msg", "response comparison failed", "route-name", p.routeName, + level.Error(util_log.Logger).Log("msg", "response comparison failed", "route-name", p.routeName, "query", r.URL.RawQuery, "err", err) result = comparisonFailed } diff --git a/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go b/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go index 4da61cfcd53a..913ae57967e6 100644 --- a/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go +++ b/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go @@ -9,7 +9,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // SamplesComparatorFunc helps with comparing different types of samples coming from /api/v1/query and /api/v1/query_range routes. @@ -109,7 +109,7 @@ func compareMatrix(expectedRaw, actualRaw json.RawMessage, tolerance float64) er err := fmt.Errorf("expected %d samples for metric %s but got %d", expectedMetricLen, expectedMetric.Metric, actualMetricLen) if expectedMetricLen > 0 && actualMetricLen > 0 { - level.Error(util.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedMetric.Values[0].Timestamp, + level.Error(util_log.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedMetric.Values[0].Timestamp, "newest-expected-ts", expectedMetric.Values[expectedMetricLen-1].Timestamp, "oldest-actual-ts", actualMetric.Values[0].Timestamp, "newest-actual-ts", actualMetric.Values[actualMetricLen-1].Timestamp) } diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s index bfafa0ccfce8..7a3ead17eacf 100644 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -70,7 +70,7 @@ loop: // x := uint32(src[s] >> 2) // switch MOVW $60, R1 - ADD R4>>2, ZR, R4 + LSRW $2, R4, R4 CMPW R4, R1 BLS tagLit60Plus @@ -111,13 +111,12 @@ doLit: // is contiguous in memory and so it needs to leave enough source bytes to // read the next tag without refilling buffers, but Go's Decode assumes // contiguousness (the src argument is a []byte). - MOVD $16, R1 - CMP R1, R4 - BGT callMemmove - CMP R1, R2 - BLT callMemmove - CMP R1, R3 - BLT callMemmove + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove // !!! Implement the copy from src to dst as a 16-byte load and store. // (Decode's documentation says that dst and src must not overlap.) @@ -130,9 +129,8 @@ doLit: // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - - VLD1 0(R6), [V0.B16] - VST1 [V0.B16], 0(R7) + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) // d += length // s += length @@ -210,8 +208,7 @@ tagLit61: B doLit tagLit62Plus: - MOVW $62, R1 - CMPW R1, R4 + CMPW $62, R4 BHI tagLit63 // case x == 62: @@ -273,10 +270,9 @@ tagCopy: // We have a copy tag. We assume that: // - R3 == src[s] & 0x03 // - R4 == src[s] - MOVD $2, R1 - CMP R1, R3 - BEQ tagCopy2 - BGT tagCopy4 + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 // case tagCopy1: // s += 2 @@ -346,13 +342,11 @@ doCopy: // } // copy 16 bytes // d += length - MOVD $16, R1 - MOVD $8, R0 - CMP R1, R4 + CMP $16, R4 BGT slowForwardCopy - CMP R0, R5 + CMP $8, R5 BLT slowForwardCopy - CMP R1, R14 + CMP $16, R14 BLT slowForwardCopy MOVD 0(R15), R2 MOVD R2, 0(R7) @@ -426,8 +420,7 @@ makeOffsetAtLeast8: // // The two previous lines together means that d-offset, and therefore // // R15, is unchanged. // } - MOVD $8, R1 - CMP R1, R5 + CMP $8, R5 BGE fixUpSlowForwardCopy MOVD (R15), R3 MOVD R3, (R7) @@ -477,9 +470,7 @@ verySlowForwardCopy: ADD $1, R15, R15 ADD $1, R7, R7 SUB $1, R4, R4 - MOVD $0, R1 - CMP R1, R4 - BNE verySlowForwardCopy + CBNZ R4, verySlowForwardCopy B loop // The code above handles copy tags. diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s index 1f565ee75f2c..bf83667d711f 100644 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -35,11 +35,9 @@ TEXT ·emitLiteral(SB), NOSPLIT, $32-56 MOVW R3, R4 SUBW $1, R4, R4 - MOVW $60, R2 - CMPW R2, R4 + CMPW $60, R4 BLT oneByte - MOVW $256, R2 - CMPW R2, R4 + CMPW $256, R4 BLT twoBytes threeBytes: @@ -98,8 +96,7 @@ TEXT ·emitCopy(SB), NOSPLIT, $0-48 loop0: // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 + CMPW $68, R3 BLT step1 // Emit a length 64 copy, encoded as 3 bytes. @@ -112,9 +109,8 @@ loop0: step1: // if length > 64 { etc } - MOVD $64, R2 - CMP R2, R3 - BLE step2 + CMP $64, R3 + BLE step2 // Emit a length 60 copy, encoded as 3 bytes. MOVD $0xee, R2 @@ -125,11 +121,9 @@ step1: step2: // if length >= 12 || offset >= 2048 { goto step3 } - MOVD $12, R2 - CMP R2, R3 + CMP $12, R3 BGE step3 - MOVW $2048, R2 - CMPW R2, R11 + CMPW $2048, R11 BGE step3 // Emit the remaining copy, encoded as 2 bytes. @@ -295,27 +289,24 @@ varTable: // var table [maxTableSize]uint16 // // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each VST1 - // writes 64 bytes, so we can do only tableSize/32 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - // This clear could overrun the first tableSize elements, but it won't - // overrun the allocated stack size. + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. ADD $128, RSP, R17 MOVD R17, R4 // !!! R6 = &src[tableSize] ADD R6<<1, R17, R6 - // zero the SIMD registers - VEOR V0.B16, V0.B16, V0.B16 - VEOR V1.B16, V1.B16, V1.B16 - VEOR V2.B16, V2.B16, V2.B16 - VEOR V3.B16, V3.B16, V3.B16 - memclr: - VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R4) - CMP R4, R6 - BHI memclr + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr // !!! R6 = &src[0] MOVD R7, R6 @@ -404,8 +395,7 @@ fourByteMatch: // on inputMargin in encode.go. MOVD R7, R3 SUB R10, R3, R3 - MOVD $16, R2 - CMP R2, R3 + CMP $16, R3 BLE emitLiteralFastPath // ---------------------------------------- @@ -454,18 +444,21 @@ inlineEmitLiteralMemmove: MOVD R3, 24(RSP) // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - B inner1 + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 inlineEmitLiteralEnd: // End inline of the emitLiteral call. @@ -489,9 +482,9 @@ emitLiteralFastPath: // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - VLD1 0(R10), [V0.B16] - VST1 [V0.B16], 0(R8) - ADD R3, R8, R8 + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 inner1: // for { etc } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 3a05e97eb34b..fd0abe3b4892 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -35,6 +35,8 @@ import ( "github.com/thanos-io/thanos/pkg/runutil" ) +const FetcherConcurrency = 32 + type fetcherMetrics struct { syncs prometheus.Counter syncFailures prometheus.Counter @@ -301,6 +303,7 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) { ch = make(chan ulid.ULID, f.concurrency) mtx sync.Mutex ) + level.Debug(f.logger).Log("msg", "fetching meta data", "concurrency", f.concurrency) for i := 0; i < f.concurrency; i++ { eg.Go(func() error { for id := range ch { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go index e9b9dc20bdc6..d4b9dee03b67 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -23,6 +23,11 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" ) +var ( + errNotIdle = errors.New("the reader is not idle") + errUnloadedWhileLoading = errors.New("the index-header has been concurrently unloaded") +) + // LazyBinaryReaderMetrics holds metrics tracked by LazyBinaryReader. type LazyBinaryReaderMetrics struct { loadCount prometheus.Counter @@ -133,7 +138,8 @@ func (r *LazyBinaryReader) Close() error { defer r.onClosed(r) } - return r.unload() + // Unload without checking if idle. + return r.unloadIfIdleSince(0) } // IndexVersion implements Reader. @@ -203,7 +209,7 @@ func (r *LazyBinaryReader) LabelNames() ([]string, error) { // load ensures the underlying binary index-header reader has been successfully loaded. Returns // an error on failure. This function MUST be called with the read lock already acquired. -func (r *LazyBinaryReader) load() error { +func (r *LazyBinaryReader) load() (returnErr error) { // Nothing to do if we already tried loading it. if r.reader != nil { return nil @@ -216,8 +222,16 @@ func (r *LazyBinaryReader) load() error { // the read lock once done. r.readerMx.RUnlock() r.readerMx.Lock() - defer r.readerMx.RLock() - defer r.readerMx.Unlock() + defer func() { + r.readerMx.Unlock() + r.readerMx.RLock() + + // Between the write unlock and the subsequent read lock, the unload() may have run, + // so we make sure to catch this edge case. + if returnErr == nil && r.reader == nil { + returnErr = errUnloadedWhileLoading + } + }() // Ensure none else tried to load it in the meanwhile. if r.reader != nil { @@ -245,19 +259,22 @@ func (r *LazyBinaryReader) load() error { return nil } -// unload closes underlying BinaryReader. Calling this function on a already unloaded reader is a no-op. -func (r *LazyBinaryReader) unload() error { - // Always update the used timestamp so that the pool will not call unload() again until the next - // idle timeout is hit. - r.usedAt.Store(time.Now().UnixNano()) - +// unloadIfIdleSince closes underlying BinaryReader if the reader is idle since given time (as unix nano). If idleSince is 0, +// the check on the last usage is skipped. Calling this function on a already unloaded reader is a no-op. +func (r *LazyBinaryReader) unloadIfIdleSince(ts int64) error { r.readerMx.Lock() defer r.readerMx.Unlock() + // Nothing to do if already unloaded. if r.reader == nil { return nil } + // Do not unloadIfIdleSince if not idle. + if ts > 0 && r.usedAt.Load() > ts { + return errNotIdle + } + r.metrics.unloadCount.Inc() if err := r.reader.Close(); err != nil { r.metrics.unloadFailedCount.Inc() @@ -268,6 +285,16 @@ func (r *LazyBinaryReader) unload() error { return nil } -func (r *LazyBinaryReader) lastUsedAt() int64 { - return r.usedAt.Load() +// isIdleSince returns true if the reader is idle since given time (as unix nano). +func (r *LazyBinaryReader) isIdleSince(ts int64) bool { + if r.usedAt.Load() > ts { + return false + } + + // A reader can be considered idle only if it's loaded. + r.readerMx.RLock() + loaded := r.reader != nil + r.readerMx.RUnlock() + + return loaded } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index 660ae4853a39..93f1fd88b371 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -11,6 +11,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/oklog/ulid" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/thanos/pkg/objstore" @@ -98,29 +99,22 @@ func (p *ReaderPool) Close() { } func (p *ReaderPool) closeIdleReaders() { - for _, r := range p.getIdleReaders() { - // Closing an already closed reader is a no-op, so we close it and just update - // the last timestamp on success. If it will be still be idle the next time this - // function is called, we'll try to close it again and will just be a no-op. - // - // Due to concurrency, the current implementation may close a reader which was - // use between when the list of idle readers has been computed and now. This is - // an edge case we're willing to accept, to not further complicate the logic. - if err := r.unload(); err != nil { + idleTimeoutAgo := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano() + + for _, r := range p.getIdleReadersSince(idleTimeoutAgo) { + if err := r.unloadIfIdleSince(idleTimeoutAgo); err != nil && !errors.Is(err, errNotIdle) { level.Warn(p.logger).Log("msg", "failed to close idle index-header reader", "err", err) } } } -func (p *ReaderPool) getIdleReaders() []*LazyBinaryReader { +func (p *ReaderPool) getIdleReadersSince(ts int64) []*LazyBinaryReader { p.lazyReadersMx.Lock() defer p.lazyReadersMx.Unlock() var idle []*LazyBinaryReader - threshold := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano() - for r := range p.lazyReaders { - if r.lastUsedAt() < threshold { + if r.isIdleSince(ts) { idle = append(idle, r) } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 1046057039c2..e27478deda86 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -450,7 +450,7 @@ func (cg *Group) Resolution() int64 { // Planner returns blocks to compact. type Planner interface { - // Plan returns a block directories of blocks that should be compacted into single one. + // Plan returns a list of blocks that should be compacted into single one. // The blocks can be overlapping. The provided metadata has to be ordered by minTime. Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go similarity index 100% rename from vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go rename to vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go index eb679679805b..edbe49d42494 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go @@ -51,6 +51,11 @@ var DefaultConfig = Config{ HTTPConfig: HTTPConfig{ IdleConnTimeout: model.Duration(90 * time.Second), ResponseHeaderTimeout: model.Duration(2 * time.Minute), + TLSHandshakeTimeout: model.Duration(10 * time.Second), + ExpectContinueTimeout: model.Duration(1 * time.Second), + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + MaxConnsPerHost: 0, }, // Minimum file size after which an HTTP multipart request should be used to upload objects to storage. // Set to 128 MiB as in the minio client. @@ -94,6 +99,12 @@ type HTTPConfig struct { ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"` + ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"` + MaxIdleConns int `yaml:"max_idle_conns"` + MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` + MaxConnsPerHost int `yaml:"max_conns_per_host"` + // Allow upstream callers to inject a round tripper Transport http.RoundTripper `yaml:"-"` } @@ -111,11 +122,12 @@ func DefaultTransport(config Config) *http.Transport { DualStack: true, }).DialContext, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, + MaxIdleConns: config.HTTPConfig.MaxIdleConns, + MaxIdleConnsPerHost: config.HTTPConfig.MaxIdleConnsPerHost, IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout), - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, + MaxConnsPerHost: config.HTTPConfig.MaxConnsPerHost, + TLSHandshakeTimeout: time.Duration(config.HTTPConfig.TLSHandshakeTimeout), + ExpectContinueTimeout: time.Duration(config.HTTPConfig.ExpectContinueTimeout), // A custom ResponseHeaderTimeout was introduced // to cover cases where the tcp connection works but // the server never answers. Defaults to 2 minutes. diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go index 9372382923e0..fbc832ed7bc9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go @@ -39,9 +39,6 @@ var DefaultConfig = Config{ Timeout: model.Duration(5 * time.Minute), } -// TODO(FUSAKLA): Added to avoid breaking dependency of Cortex which uses the original struct name SwiftConfig. -type SwiftConfig = Config - type Config struct { AuthVersion int `yaml:"auth_version"` AuthUrl string `yaml:"auth_url"` diff --git a/vendor/modules.txt b/vendor/modules.txt index 34534e5f15cc..58dbe93607bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -164,7 +164,7 @@ github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog -# github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee +# github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66 ## explicit github.com/cortexproject/cortex/pkg/alertmanager github.com/cortexproject/cortex/pkg/alertmanager/alerts @@ -455,7 +455,7 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/golang/snappy v0.0.2 +# github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 ## explicit github.com/golang/snappy # github.com/google/btree v1.0.0 @@ -856,7 +856,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51 +# github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader github.com/thanos-io/thanos/pkg/block/metadata