From 337db2fedfab25d4c1e9a26d292df496966c92de Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Wed, 16 Sep 2020 17:47:11 +0530 Subject: [PATCH] revendor cortex to latest master (#2627) --- docs/sources/configuration/query-frontend.md | 4 +- docs/sources/operations/upgrade.md | 5 + go.mod | 6 +- go.sum | 46 +- pkg/loki/modules.go | 2 +- pkg/querier/querier_mock_test.go | 4 + pkg/querier/queryrange/roundtrip_test.go | 1 - pkg/storage/util_test.go | 3 + production/ksonnet/loki/config.libsonnet | 2 +- .../cortexproject/cortex/pkg/api/api.go | 26 +- .../cortexproject/cortex/pkg/api/handlers.go | 91 +- .../cortexproject/cortex/pkg/api/queryable.go | 3 +- .../cortex/pkg/chunk/cache/cache.go | 2 +- .../cortex/pkg/chunk/cache/redis_cache.go | 130 +- .../cortex/pkg/chunk/cache/redis_client.go | 137 + .../pkg/chunk/cassandra/storage_client.go | 18 +- .../cortex/pkg/chunk/chunk_store.go | 4 + .../cortex/pkg/chunk/composite_store.go | 23 +- .../pkg/chunk/gcp/bigtable_index_client.go | 6 +- .../pkg/chunk/inmemory_storage_client.go | 52 + .../cortex/pkg/chunk/objectclient/client.go | 6 +- .../cortex/pkg/chunk/purger/purger.go | 150 +- .../cortex/pkg/chunk/schema_config.go | 15 +- .../cortex/pkg/chunk/storage/factory.go | 2 +- .../cortex/pkg/chunk/testutils/testutils.go | 12 +- .../cortex/pkg/configs/api/api.go | 3 +- .../cortexproject/cortex/pkg/cortex/cortex.go | 13 +- .../cortex/pkg/cortex/modules.go | 79 +- .../cortex/pkg/cortex/tracing.go | 33 + .../cortex/pkg/distributor/distributor.go | 26 +- .../cortex/pkg/flusher/flusher.go | 6 +- .../cortex/pkg/ingester/ingester.go | 17 +- .../cortex/pkg/ingester/ingester_v2.go | 2 - .../pkg/querier/blocks_store_queryable.go | 2 - .../cortex/pkg/querier/querier.go | 32 +- .../pkg/querier/queryrange/results_cache.go | 9 +- .../pkg/ring/kv/memberlist/kv_init_service.go | 10 +- .../ring/kv/memberlist/memberlist_client.go | 75 +- .../pkg/ring/kv/memberlist/tcp_transport.go | 30 +- .../cortex/pkg/ring/lifecycler.go | 2 +- .../cortexproject/cortex/pkg/ruler/api.go | 22 + .../cortexproject/cortex/pkg/ruler/compat.go | 18 +- .../cortexproject/cortex/pkg/ruler/ruler.go | 6 +- .../cortex/pkg/ruler/rules/compat.go | 17 +- .../cortex/pkg/ruler/rules/local/local.go | 5 + .../ruler/rules/objectclient/rule_store.go | 32 +- .../cortex/pkg/ruler/rules/store.go | 6 + .../pkg/storage/backend/azure/config.go | 2 +- .../pkg/storage/backend/filesystem/config.go | 2 +- .../cortex/pkg/storage/backend/gcs/config.go | 2 +- .../cortex/pkg/storage/backend/s3/config.go | 2 +- .../cortex/pkg/storage/tsdb/config.go | 54 +- .../cortex/pkg/storage/tsdb/index_cache.go | 2 +- .../cortex/pkg/storegateway/gateway.go | 4 +- .../cortex/pkg/storegateway/gateway_ring.go | 18 +- .../cortex/pkg/util/modules/modules.go | 15 + .../cortex/pkg/util/services/manager.go | 13 +- .../cortex/pkg/util/validation/limits.go | 19 +- .../github.com/dgryski/go-rendezvous/LICENSE | 21 + .../github.com/dgryski/go-rendezvous/rdv.go | 79 + vendor/github.com/felixge/fgprof/LICENSE.txt | 8 + vendor/github.com/felixge/fgprof/README.md | 214 ++ vendor/github.com/felixge/fgprof/fgprof.go | 97 + vendor/github.com/felixge/fgprof/format.go | 102 + vendor/github.com/felixge/fgprof/go.mod | 5 + vendor/github.com/felixge/fgprof/go.sum | 7 + vendor/github.com/felixge/fgprof/handler.go | 32 + vendor/github.com/felixge/fgprof/pprof.go | 56 + .../fsnotify/fsnotify/.editorconfig | 9 +- .../fsnotify/fsnotify/.gitattributes | 1 + .../github.com/fsnotify/fsnotify/.travis.yml | 20 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 2 +- vendor/github.com/fsnotify/fsnotify/README.md | 71 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 4 +- vendor/github.com/fsnotify/fsnotify/go.mod | 5 + vendor/github.com/fsnotify/fsnotify/go.sum | 2 + .../fsnotify/fsnotify/inotify_poller.go | 4 +- .../fsnotify/fsnotify/open_mode_bsd.go | 2 +- .../fsnotify/fsnotify/open_mode_darwin.go | 2 +- .../github.com/go-redis/redis/v8/.gitignore | 3 + .../go-redis/redis/v8/.golangci.yml | 21 + .../github.com/go-redis/redis/v8/.prettierrc | 4 + .../github.com/go-redis/redis/v8/.travis.yml | 20 + .../github.com/go-redis/redis/v8/CHANGELOG.md | 85 + vendor/github.com/go-redis/redis/v8/LICENSE | 25 + vendor/github.com/go-redis/redis/v8/Makefile | 20 + vendor/github.com/go-redis/redis/v8/README.md | 150 + .../github.com/go-redis/redis/v8/cluster.go | 1694 +++++++++++ .../go-redis/redis/v8/cluster_commands.go | 25 + .../github.com/go-redis/redis/v8/command.go | 2169 +++++++++++++ .../github.com/go-redis/redis/v8/commands.go | 2688 +++++++++++++++++ vendor/github.com/go-redis/redis/v8/doc.go | 4 + vendor/github.com/go-redis/redis/v8/error.go | 119 + vendor/github.com/go-redis/redis/v8/go.mod | 16 + vendor/github.com/go-redis/redis/v8/go.sum | 113 + .../go-redis/redis/v8/internal/arg.go | 147 + .../redis/v8/internal/hashtag/hashtag.go | 78 + .../go-redis/redis/v8/internal/instruments.go | 33 + .../go-redis/redis/v8/internal/internal.go | 25 + .../go-redis/redis/v8/internal/log.go | 24 + .../go-redis/redis/v8/internal/once.go | 60 + .../go-redis/redis/v8/internal/pool/conn.go | 132 + .../go-redis/redis/v8/internal/pool/pool.go | 524 ++++ .../redis/v8/internal/pool/pool_single.go | 58 + .../redis/v8/internal/pool/pool_sticky.go | 202 ++ .../redis/v8/internal/proto/reader.go | 314 ++ .../go-redis/redis/v8/internal/proto/scan.go | 166 + .../redis/v8/internal/proto/writer.go | 153 + .../go-redis/redis/v8/internal/safe.go | 11 + .../go-redis/redis/v8/internal/unsafe.go | 20 + .../go-redis/redis/v8/internal/util.go | 81 + .../go-redis/redis/v8/internal/util/safe.go | 11 + .../redis/v8/internal/util/strconv.go | 19 + .../go-redis/redis/v8/internal/util/unsafe.go | 22 + .../github.com/go-redis/redis/v8/iterator.go | 77 + .../github.com/go-redis/redis/v8/options.go | 270 ++ .../github.com/go-redis/redis/v8/pipeline.go | 137 + vendor/github.com/go-redis/redis/v8/pubsub.go | 618 ++++ vendor/github.com/go-redis/redis/v8/redis.go | 764 +++++ vendor/github.com/go-redis/redis/v8/result.go | 180 ++ vendor/github.com/go-redis/redis/v8/ring.go | 729 +++++ vendor/github.com/go-redis/redis/v8/script.go | 65 + .../github.com/go-redis/redis/v8/sentinel.go | 639 ++++ vendor/github.com/go-redis/redis/v8/tx.go | 151 + .../github.com/go-redis/redis/v8/universal.go | 205 ++ .../gomodule/redigo/internal/commandinfo.go | 54 - .../github.com/gomodule/redigo/redis/conn.go | 673 ----- .../github.com/gomodule/redigo/redis/doc.go | 177 -- .../github.com/gomodule/redigo/redis/go16.go | 27 - .../github.com/gomodule/redigo/redis/go17.go | 29 - .../github.com/gomodule/redigo/redis/go18.go | 9 - .../github.com/gomodule/redigo/redis/log.go | 134 - .../github.com/gomodule/redigo/redis/pool.go | 562 ---- .../gomodule/redigo/redis/pool17.go | 35 - .../gomodule/redigo/redis/pubsub.go | 148 - .../github.com/gomodule/redigo/redis/redis.go | 117 - .../github.com/gomodule/redigo/redis/reply.go | 479 --- .../github.com/gomodule/redigo/redis/scan.go | 585 ---- .../gomodule/redigo/redis/script.go | 91 - .../github.com/google/go-cmp/cmp/compare.go | 79 +- .../google/go-cmp/cmp/export_panic.go | 2 +- .../google/go-cmp/cmp/export_unsafe.go | 20 +- .../google/go-cmp/cmp/internal/diff/diff.go | 22 +- .../google/go-cmp/cmp/internal/value/name.go | 157 + .../cmp/internal/value/pointer_purego.go | 10 + .../cmp/internal/value/pointer_unsafe.go | 10 + vendor/github.com/google/go-cmp/cmp/path.go | 7 +- vendor/github.com/google/go-cmp/cmp/report.go | 5 +- .../google/go-cmp/cmp/report_compare.go | 200 +- .../google/go-cmp/cmp/report_references.go | 264 ++ .../google/go-cmp/cmp/report_reflect.go | 292 +- .../google/go-cmp/cmp/report_slices.go | 135 +- .../google/go-cmp/cmp/report_text.go | 86 +- vendor/github.com/google/pprof/AUTHORS | 7 + vendor/github.com/google/pprof/CONTRIBUTORS | 16 + vendor/github.com/google/pprof/LICENSE | 202 ++ .../github.com/google/pprof/profile/encode.go | 567 ++++ .../github.com/google/pprof/profile/filter.go | 270 ++ .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 ++ .../google/pprof/profile/legacy_profile.go | 1225 ++++++++ .../github.com/google/pprof/profile/merge.go | 479 +++ .../google/pprof/profile/profile.go | 793 +++++ .../github.com/google/pprof/profile/proto.go | 370 +++ .../github.com/google/pprof/profile/prune.go | 178 ++ vendor/github.com/stretchr/testify/LICENSE | 2 +- ...ssertion_order.go => assertion_compare.go} | 173 +- .../testify/assert/assertion_format.go | 40 +- .../testify/assert/assertion_forward.go | 62 +- .../stretchr/testify/assert/assertions.go | 191 +- .../testify/assert/http_assertions.go | 25 +- .../github.com/stretchr/testify/mock/mock.go | 68 +- .../stretchr/testify/require/require.go | 74 +- .../testify/require/require_forward.go | 62 +- .../weaveworks/common/aws/config.go | 8 +- .../weaveworks/common/httpgrpc/README.md | 2 +- .../weaveworks/common/server/server.go | 28 +- vendor/go.opentelemetry.io/otel/.gitignore | 22 + vendor/go.opentelemetry.io/otel/.gitmodules | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 32 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 803 +++++ vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 361 +++ .../otel}/LICENSE | 28 +- vendor/go.opentelemetry.io/otel/Makefile | 157 + .../go.opentelemetry.io/otel/Makefile.proto | 72 + vendor/go.opentelemetry.io/otel/README.md | 80 + vendor/go.opentelemetry.io/otel/RELEASING.md | 81 + .../otel/api/correlation/context.go | 172 ++ .../correlation_context_propagator.go | 118 + .../otel/api/correlation/doc.go | 19 + .../otel/api/correlation/map.go | 176 ++ .../otel/api/global/handler.go | 91 + .../otel/api/global/internal/meter.go | 347 +++ .../otel/api/global/internal/state.go | 140 + .../otel/api/global/internal/trace.go | 124 + .../otel/api/global/metric.go | 49 + .../otel/api/global/propagation.go | 32 + .../otel/api/global/trace.go | 44 + .../otel/api/metric/async.go | 217 ++ .../otel/api/metric/config.go | 125 + .../otel/api/metric/counter.go | 95 + .../otel/api/metric/descriptor.go | 77 + .../otel/api/metric/doc.go | 49 + .../otel/api/metric/kind.go | 79 + .../otel/api/metric/kind_string.go | 28 + .../otel/api/metric/meter.go | 320 ++ .../otel/api/metric/must.go | 222 ++ .../otel/api/metric/noop.go | 58 + .../otel/api/metric/number.go | 540 ++++ .../otel/api/metric/numberkind_string.go | 24 + .../otel/api/metric/observer.go | 124 + .../otel/api/metric/registry/registry.go | 170 ++ .../otel/api/metric/sdkapi.go | 94 + .../otel/api/metric/sync.go | 192 ++ .../otel/api/metric/updowncounter.go | 96 + .../otel/api/metric/valuerecorder.go | 97 + .../otel/api/propagation/doc.go | 16 + .../otel/api/propagation/propagation.go | 143 + .../go.opentelemetry.io/otel/api/trace/api.go | 273 ++ .../otel/api/trace/b3_propagator.go | 343 +++ .../otel/api/trace/context.go | 55 + .../go.opentelemetry.io/otel/api/trace/doc.go | 15 + .../otel/api/trace/noop_span.go | 79 + .../otel/api/trace/noop_trace.go | 29 + .../otel/api/trace/noop_trace_provider.go | 24 + .../otel/api/trace/span_context.go | 197 ++ .../api/trace/trace_context_propagator.go | 152 + .../go.opentelemetry.io/otel/api/unit/doc.go | 15 + .../go.opentelemetry.io/otel/api/unit/unit.go | 23 + .../go.opentelemetry.io/otel/codes/codes.go | 89 + vendor/go.opentelemetry.io/otel/doc.go | 15 + .../go.opentelemetry.io/otel/error_handler.go | 22 + .../go.opentelemetry.io/otel/get_main_pkgs.sh | 36 + vendor/go.opentelemetry.io/otel/go.mod | 8 + vendor/go.opentelemetry.io/otel/go.sum | 15 + .../otel/internal/rawhelpers.go | 91 + vendor/go.opentelemetry.io/otel/label/doc.go | 16 + .../go.opentelemetry.io/otel/label/encoder.go | 150 + .../otel/label/iterator.go | 143 + vendor/go.opentelemetry.io/otel/label/key.go | 169 ++ vendor/go.opentelemetry.io/otel/label/kv.go | 144 + vendor/go.opentelemetry.io/otel/label/set.go | 468 +++ .../otel/label/type_string.go | 32 + .../go.opentelemetry.io/otel/label/value.go | 284 ++ vendor/go.opentelemetry.io/otel/otel.go | 27 + .../go.opentelemetry.io/otel/pre_release.sh | 95 + vendor/go.opentelemetry.io/otel/tag.sh | 178 ++ .../otel/verify_examples.sh | 85 + vendor/golang.org/x/exp/AUTHORS | 3 + vendor/golang.org/x/exp/CONTRIBUTORS | 3 + vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + vendor/golang.org/x/exp/rand/exp.go | 222 ++ vendor/golang.org/x/exp/rand/normal.go | 157 + vendor/golang.org/x/exp/rand/rand.go | 371 +++ vendor/golang.org/x/exp/rand/rng.go | 91 + vendor/golang.org/x/exp/rand/zipf.go | 77 + vendor/modules.txt | 41 +- 259 files changed, 30816 insertions(+), 3976 deletions(-) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_client.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go create mode 100644 vendor/github.com/dgryski/go-rendezvous/LICENSE create mode 100644 vendor/github.com/dgryski/go-rendezvous/rdv.go create mode 100644 vendor/github.com/felixge/fgprof/LICENSE.txt create mode 100644 vendor/github.com/felixge/fgprof/README.md create mode 100644 vendor/github.com/felixge/fgprof/fgprof.go create mode 100644 vendor/github.com/felixge/fgprof/format.go create mode 100644 vendor/github.com/felixge/fgprof/go.mod create mode 100644 vendor/github.com/felixge/fgprof/go.sum create mode 100644 vendor/github.com/felixge/fgprof/handler.go create mode 100644 vendor/github.com/felixge/fgprof/pprof.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/go.mod create mode 100644 vendor/github.com/fsnotify/fsnotify/go.sum create mode 100644 vendor/github.com/go-redis/redis/v8/.gitignore create mode 100644 vendor/github.com/go-redis/redis/v8/.golangci.yml create mode 100644 vendor/github.com/go-redis/redis/v8/.prettierrc create mode 100644 vendor/github.com/go-redis/redis/v8/.travis.yml create mode 100644 vendor/github.com/go-redis/redis/v8/CHANGELOG.md create mode 100644 vendor/github.com/go-redis/redis/v8/LICENSE create mode 100644 vendor/github.com/go-redis/redis/v8/Makefile create mode 100644 vendor/github.com/go-redis/redis/v8/README.md create mode 100644 vendor/github.com/go-redis/redis/v8/cluster.go create mode 100644 vendor/github.com/go-redis/redis/v8/cluster_commands.go create mode 100644 vendor/github.com/go-redis/redis/v8/command.go create mode 100644 vendor/github.com/go-redis/redis/v8/commands.go create mode 100644 vendor/github.com/go-redis/redis/v8/doc.go create mode 100644 vendor/github.com/go-redis/redis/v8/error.go create mode 100644 vendor/github.com/go-redis/redis/v8/go.mod create mode 100644 vendor/github.com/go-redis/redis/v8/go.sum create mode 100644 vendor/github.com/go-redis/redis/v8/internal/arg.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/instruments.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/internal.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/log.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/once.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/conn.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/pool.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/reader.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/scan.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/writer.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/safe.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/unsafe.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/util.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/util/safe.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/util/strconv.go create mode 100644 vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go create mode 100644 vendor/github.com/go-redis/redis/v8/iterator.go create mode 100644 vendor/github.com/go-redis/redis/v8/options.go create mode 100644 vendor/github.com/go-redis/redis/v8/pipeline.go create mode 100644 vendor/github.com/go-redis/redis/v8/pubsub.go create mode 100644 vendor/github.com/go-redis/redis/v8/redis.go create mode 100644 vendor/github.com/go-redis/redis/v8/result.go create mode 100644 vendor/github.com/go-redis/redis/v8/ring.go create mode 100644 vendor/github.com/go-redis/redis/v8/script.go create mode 100644 vendor/github.com/go-redis/redis/v8/sentinel.go create mode 100644 vendor/github.com/go-redis/redis/v8/tx.go create mode 100644 vendor/github.com/go-redis/redis/v8/universal.go delete mode 100644 vendor/github.com/gomodule/redigo/internal/commandinfo.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/conn.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/doc.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/go16.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/go17.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/go18.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/log.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/pool.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/pool17.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/pubsub.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/redis.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/reply.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/scan.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/script.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/google/pprof/AUTHORS create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 vendor/github.com/google/pprof/LICENSE create mode 100644 vendor/github.com/google/pprof/profile/encode.go create mode 100644 vendor/github.com/google/pprof/profile/filter.go create mode 100644 vendor/github.com/google/pprof/profile/index.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 vendor/github.com/google/pprof/profile/merge.go create mode 100644 vendor/github.com/google/pprof/profile/profile.go create mode 100644 vendor/github.com/google/pprof/profile/proto.go create mode 100644 vendor/github.com/google/pprof/profile/prune.go rename vendor/github.com/stretchr/testify/assert/{assertion_order.go => assertion_compare.go} (62%) create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md rename vendor/{github.com/gomodule/redigo => go.opentelemetry.io/otel}/LICENSE (89%) create mode 100644 vendor/go.opentelemetry.io/otel/Makefile create mode 100644 vendor/go.opentelemetry.io/otel/Makefile.proto create mode 100644 vendor/go.opentelemetry.io/otel/README.md create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 vendor/go.opentelemetry.io/otel/api/correlation/context.go create mode 100644 vendor/go.opentelemetry.io/otel/api/correlation/correlation_context_propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/api/correlation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/api/correlation/map.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/internal/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/internal/state.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/internal/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/api/global/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/async.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/descriptor.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/kind.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/kind_string.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/must.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/number.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/observer.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/sync.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go create mode 100644 vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go create mode 100644 vendor/go.opentelemetry.io/otel/api/propagation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/api/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/api.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/b3_propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/context.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/noop_span.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/span_context.go create mode 100644 vendor/go.opentelemetry.io/otel/api/trace/trace_context_propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/api/unit/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/api/unit/unit.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go create mode 100644 vendor/go.opentelemetry.io/otel/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh create mode 100644 vendor/go.opentelemetry.io/otel/go.mod create mode 100644 vendor/go.opentelemetry.io/otel/go.sum create mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/label/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/label/encoder.go create mode 100644 vendor/go.opentelemetry.io/otel/label/iterator.go create mode 100644 vendor/go.opentelemetry.io/otel/label/key.go create mode 100644 vendor/go.opentelemetry.io/otel/label/kv.go create mode 100644 vendor/go.opentelemetry.io/otel/label/set.go create mode 100644 vendor/go.opentelemetry.io/otel/label/type_string.go create mode 100644 vendor/go.opentelemetry.io/otel/label/value.go create mode 100644 vendor/go.opentelemetry.io/otel/otel.go create mode 100644 vendor/go.opentelemetry.io/otel/pre_release.sh create mode 100644 vendor/go.opentelemetry.io/otel/tag.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/golang.org/x/exp/AUTHORS create mode 100644 vendor/golang.org/x/exp/CONTRIBUTORS create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/rand/exp.go create mode 100644 vendor/golang.org/x/exp/rand/normal.go create mode 100644 vendor/golang.org/x/exp/rand/rand.go create mode 100644 vendor/golang.org/x/exp/rand/rng.go create mode 100644 vendor/golang.org/x/exp/rand/zipf.go diff --git a/docs/sources/configuration/query-frontend.md b/docs/sources/configuration/query-frontend.md index aaffa009bf74..2ba62fe45488 100644 --- a/docs/sources/configuration/query-frontend.md +++ b/docs/sources/configuration/query-frontend.md @@ -48,7 +48,6 @@ data: cache_results: true results_cache: - max_freshness: 10m cache: # We're going to use the in-process "FIFO" cache enable_fifocache: true @@ -56,6 +55,9 @@ data: size: 1024 validity: 24h + limits_config: + max_cache_freshness_per_query: '10m' + frontend: log_queries_longer_than: 5s downstream_url: querier..svc.cluster.local:3100 diff --git a/docs/sources/operations/upgrade.md b/docs/sources/operations/upgrade.md index 8657462f7d02..81fe87f5acd3 100644 --- a/docs/sources/operations/upgrade.md +++ b/docs/sources/operations/upgrade.md @@ -12,6 +12,11 @@ On this page we will document any upgrade issues/gotchas/considerations we are a ## Master / Unreleased +### IMPORTANT: `results_cache.max_freshness` removed from YAML config + +The `max_freshness` config from `results_cache` has been removed in favour of another flag called `max_cache_freshness_per_query` in `limits_config` which has the same effect. +If you happen to have `results_cache.max_freshness` set please use `limits_config.max_cache_freshness_per_query` YAML config instead. + ## 1.6.0 ### IMPORTANT: Ksonnet Port Change and Removal of NET_BIND_SERVICE Capability from docker image diff --git a/go.mod b/go.mod index 55f57a13ccfc..a2f443840179 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cortexproject/cortex v1.3.1-0.20200901164804-97d13c1ef16f + github.com/cortexproject/cortex v1.3.1-0.20200915110508-cbaf36e4fa61 github.com/davecgh/go-spew v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect @@ -49,11 +49,11 @@ require ( github.com/segmentio/fasthash v1.0.2 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c - github.com/stretchr/testify v1.5.1 + github.com/stretchr/testify v1.6.1 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/ugorji/go v1.1.7 // indirect - github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e + github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 go.uber.org/atomic v1.6.0 golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de diff --git a/go.sum b/go.sum index dcc3e88c17c0..5a827b286475 100644 --- a/go.sum +++ b/go.sum @@ -127,6 +127,10 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= @@ -235,8 +239,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbp github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= -github.com/cortexproject/cortex v1.3.1-0.20200901164804-97d13c1ef16f h1:g+MHBeXc4V6JRVzAYzvZUJFfxsCXHmuRpz5M10mvl30= -github.com/cortexproject/cortex v1.3.1-0.20200901164804-97d13c1ef16f/go.mod h1:ub8BpRZrRa02BOM8NJTnI2YklxW/mGhEkJDrhsDfcfg= +github.com/cortexproject/cortex v1.3.1-0.20200915110508-cbaf36e4fa61 h1:zFT0SEc8peTrLX7/QquXI0P9vSkH0hlf/apDPHMlIX8= +github.com/cortexproject/cortex v1.3.1-0.20200915110508-cbaf36e4fa61/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= @@ -258,6 +262,8 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zA github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= @@ -313,6 +319,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/fgprof v0.9.1 h1:E6FUJ2Mlv043ipLOCFqo8+cHo9MhQ203E2cdEK/isEs= +github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= @@ -326,6 +334,8 @@ github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3B github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -428,6 +438,8 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72 h1:HJkWCywZsCtt//EFYNtHAOQglik0kzonhiilQCrQEgs= +github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72/go.mod h1:CJP1ZIHwhosNYwIdaHPZK9vHsM3+roNBaZ7U9Of1DXc= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -522,6 +534,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -538,6 +552,8 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -872,6 +888,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.50 h1:E01b5bVIssNhx2KnzAjMWEXkKrb8ytTqCDWY7lqmWjA= github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -890,6 +908,9 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -898,6 +919,8 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -1101,6 +1124,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf h1:yq9nWz5Iv6ejE9d/fToxgcVDk8iuAcpvrWfsHsNySxU= github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= @@ -1140,8 +1165,8 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 h1:dNVIG9aKQHR9T4uYAC4YxmkHHryOsfTwsL54WrS7u28= github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= -github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e h1:t/as1iFw9iI6s0q9ESR2tTn2qGhI42LjBkPuQLuLzM8= -github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= +github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 h1:MS5M2antM8wzMUqVxIfAi+yb6yjXvDINRFvLnmNXeIw= +github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1156,6 +1181,8 @@ github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cim github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= @@ -1183,6 +1210,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v0.11.0 h1:IN2tzQa9Gc4ZVKnTaMbPVcHjvzOdg5n9QfnmlqiET7E= +go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1245,6 +1274,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200821190819-94841d0725da h1:vfV2BR+q1+/jmgJR30Ms3RHbryruQ3Yd83lLAAue9cs= +golang.org/x/exp v0.0.0-20200821190819-94841d0725da/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1306,6 +1337,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= @@ -1337,6 +1369,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1361,9 +1394,11 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1371,6 +1406,7 @@ golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1388,6 +1424,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1587,6 +1624,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 9db9f2ee5718..4016c348bbfb 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -432,7 +432,7 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { ring.GetCodec(), } - t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV) + t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, util.Logger) return t.memberlistKV, nil } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 5e4e6785dfb6..6057ec1130b3 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -261,6 +261,10 @@ func (s *storeMock) DeleteSeriesIDs(ctx context.Context, from, through model.Tim panic("don't call me please") } +func (s *storeMock) GetChunkFetcher(_ model.Time) *chunk.Fetcher { + panic("don't call me please") +} + func (s *storeMock) GetSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) { args := s.Called(ctx, req) res := args.Get(0) diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 6d2c67b4a777..21685acdb611 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -38,7 +38,6 @@ var ( MaxRetries: 3, CacheResults: true, ResultsCacheConfig: queryrange.ResultsCacheConfig{ - LegacyMaxCacheFreshness: 1 * time.Minute, CacheConfig: cache.Config{ EnableFifoCache: true, Fifocache: cache.FifoCacheConfig{ diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 110c1dfc11ce..95af65e1fdee 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -189,6 +189,9 @@ func (m *mockChunkStore) Stop() {} func (m *mockChunkStore) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { return nil, nil } +func (m *mockChunkStore) GetChunkFetcher(_ model.Time) *chunk.Fetcher { + return nil +} func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*chunk.Fetcher, error) { refs := make([]chunk.Chunk, 0, len(m.chunks)) diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index 5389bdd697bc..b7b2a0332627 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -150,7 +150,6 @@ cache_results: true, max_retries: 5, results_cache: { - max_freshness: '10m', cache: { memcached_client: { timeout: '500ms', @@ -180,6 +179,7 @@ ingestion_rate_strategy: 'global', ingestion_rate_mb: 10, ingestion_burst_size_mb: 20, + max_cache_freshness_per_query: '10m', }, ingester: { diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index f94e4fec7f7f..2954220ad621 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -13,6 +13,7 @@ import ( "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" + "github.com/felixge/fgprof" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/gorilla/mux" @@ -66,6 +67,7 @@ type API struct { server *server.Server logger log.Logger sourceIPs *middleware.SourceIPExtractor + indexPage *IndexPageContent } func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logger) (*API, error) { @@ -88,6 +90,7 @@ func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logge server: s, logger: logger, sourceIPs: sourceIPs, + indexPage: newIndexPageContent(), } // If no authentication middleware is present in the config, use the default authentication middleware. @@ -143,6 +146,7 @@ func fakeRemoteAddr(handler http.Handler) http.Handler { // RegisterAlertmanager registers endpoints associated with the alertmanager. It will only // serve endpoints using the legacy http-prefix if it is not run as a single binary. func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target, apiEnabled bool) { + a.indexPage.AddLink(SectionAdminEndpoints, "/multitenant_alertmanager/status", "Alertmanager Status") // Ensure this route is registered before the prefixed AM route a.RegisterRoute("/multitenant_alertmanager/status", am.GetStatusHandler(), false) @@ -166,14 +170,21 @@ func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, tar } // RegisterAPI registers the standard endpoints associated with a running Cortex. -func (a *API) RegisterAPI(cfg interface{}) { +func (a *API) RegisterAPI(httpPathPrefix string, cfg interface{}) { + a.indexPage.AddLink(SectionAdminEndpoints, "/config", "Current Config") + a.RegisterRoute("/config", configHandler(cfg), false) - a.RegisterRoute("/", http.HandlerFunc(indexHandler), false) + a.RegisterRoute("/", indexHandler(httpPathPrefix, a.indexPage), false) + a.RegisterRoute("/debug/fgprof", fgprof.Handler(), false) } // RegisterDistributor registers the endpoints associated with the distributor. func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config) { a.RegisterRoute("/api/v1/push", push.Handler(pushConfig, a.sourceIPs, d.Push), true) + + a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/all_user_stats", "Usage Statistics") + a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ha_tracker", "HA Tracking Status") + a.RegisterRoute("/distributor/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false) a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false) @@ -187,6 +198,8 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib func (a *API) RegisterIngester(i *ingester.Ingester, pushConfig distributor.Config) { client.RegisterIngesterServer(a.server.GRPC, i) + a.indexPage.AddLink(SectionDangerous, "/ingester/flush", "Trigger a Flush of data from Ingester to storage") + a.indexPage.AddLink(SectionDangerous, "/ingester/shutdown", "Trigger Ingester Shutdown (Dangerous)") a.RegisterRoute("/ingester/flush", http.HandlerFunc(i.FlushHandler), false) a.RegisterRoute("/ingester/shutdown", http.HandlerFunc(i.ShutdownHandler), false) a.RegisterRoute("/ingester/push", push.Handler(pushConfig, a.sourceIPs, i.Push), true) // For testing and debugging. @@ -216,6 +229,7 @@ func (a *API) RegisterPurger(store *purger.DeleteStore, deleteRequestCancelPerio // RegisterRuler registers routes associated with the Ruler service. If the // API is not enabled only the ring route is registered. func (a *API) RegisterRuler(r *ruler.Ruler, apiEnabled bool) { + a.indexPage.AddLink(SectionAdminEndpoints, "/ruler/ring", "Ruler Ring Status") a.RegisterRoute("/ruler/ring", r, false) // Legacy Ring Route @@ -234,6 +248,7 @@ func (a *API) RegisterRuler(r *ruler.Ruler, apiEnabled bool) { a.RegisterRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.GetRuleGroup), true, "GET") a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.CreateRuleGroup), true, "POST") a.RegisterRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.DeleteRuleGroup), true, "DELETE") + a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.DeleteNamespace), true, "DELETE") // Legacy Prometheus Rule API Routes a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/rules", http.HandlerFunc(r.PrometheusRules), true, "GET") @@ -245,11 +260,13 @@ func (a *API) RegisterRuler(r *ruler.Ruler, apiEnabled bool) { a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}/{groupName}", http.HandlerFunc(r.GetRuleGroup), true, "GET") a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}", http.HandlerFunc(r.CreateRuleGroup), true, "POST") a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}/{groupName}", http.HandlerFunc(r.DeleteRuleGroup), true, "DELETE") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}", http.HandlerFunc(r.DeleteNamespace), true, "DELETE") } } -// // RegisterRing registers the ring UI page associated with the distributor for writes. +// RegisterRing registers the ring UI page associated with the distributor for writes. func (a *API) RegisterRing(r *ring.Ring) { + a.indexPage.AddLink(SectionAdminEndpoints, "/ingester/ring", "Ingester Ring Status") a.RegisterRoute("/ingester/ring", r, false) // Legacy Route @@ -260,11 +277,13 @@ func (a *API) RegisterRing(r *ring.Ring) { func (a *API) RegisterStoreGateway(s *storegateway.StoreGateway) { storegatewaypb.RegisterStoreGatewayServer(a.server.GRPC, s) + a.indexPage.AddLink(SectionAdminEndpoints, "/store-gateway/ring", "Store Gateway Ring") a.RegisterRoute("/store-gateway/ring", http.HandlerFunc(s.RingHandler), false) } // RegisterCompactor registers the ring UI page associated with the compactor. func (a *API) RegisterCompactor(c *compactor.Compactor) { + a.indexPage.AddLink(SectionAdminEndpoints, "/compactor/ring", "Compactor Ring Status") a.RegisterRoute("/compactor/ring", http.HandlerFunc(c.RingHandler), false) } @@ -404,5 +423,6 @@ func (a *API) RegisterQueryFrontend(f *frontend.Frontend) { // TODO: Refactor this code to be accomplished using the services.ServiceManager // or a future module manager #2291 func (a *API) RegisterServiceMapHandler(handler http.Handler) { + a.indexPage.AddLink(SectionAdminEndpoints, "/services", "Service Status") a.RegisterRoute("/services", handler, false) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go index ffbcad8e6b47..665d220b3964 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go @@ -1,7 +1,10 @@ package api import ( + "html/template" "net/http" + "path" + "sync" "github.com/go-kit/kit/log/level" "gopkg.in/yaml.v2" @@ -9,8 +12,52 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) -// TODO: Update this content to be a template that is dynamic based on how Cortex is run. -const indexPageContent = ` +const ( + SectionAdminEndpoints = "Admin Endpoints:" + SectionDangerous = "Dangerous:" +) + +func newIndexPageContent() *IndexPageContent { + return &IndexPageContent{ + content: map[string]map[string]string{}, + } +} + +// IndexPageContent is a map of sections to path -> description. +type IndexPageContent struct { + mu sync.Mutex + content map[string]map[string]string +} + +func (pc *IndexPageContent) AddLink(section, path, description string) { + pc.mu.Lock() + defer pc.mu.Unlock() + + sectionMap := pc.content[section] + if sectionMap == nil { + sectionMap = make(map[string]string) + pc.content[section] = sectionMap + } + + sectionMap[path] = description +} + +func (pc *IndexPageContent) GetContent() map[string]map[string]string { + pc.mu.Lock() + defer pc.mu.Unlock() + + result := map[string]map[string]string{} + for k, v := range pc.content { + sm := map[string]string{} + for smK, smV := range v { + sm[smK] = smV + } + result[k] = sm + } + return result +} + +var indexPageTemplate = ` @@ -19,31 +66,31 @@ const indexPageContent = `

Cortex

-

Admin Endpoints:

- - -

Dangerous:

+ {{ range $s, $links := . }} +

{{ $s }}

+ {{ end }} ` -func indexHandler(w http.ResponseWriter, _ *http.Request) { - if _, err := w.Write([]byte(indexPageContent)); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return +func indexHandler(httpPathPrefix string, content *IndexPageContent) http.HandlerFunc { + templ := template.New("main") + templ.Funcs(map[string]interface{}{ + "AddPathPrefix": func(link string) string { + return path.Join(httpPathPrefix, link) + }, + }) + template.Must(templ.Parse(indexPageTemplate)) + + return func(w http.ResponseWriter, r *http.Request) { + err := templ.Execute(w, content.GetContent()) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go b/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go index 8682872a47e1..2dba928d0246 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util/validation" ) func translateError(err error) error { @@ -30,7 +31,7 @@ func translateError(err error) error { case promql.ErrStorage, promql.ErrTooManySamples, promql.ErrQueryCanceled, promql.ErrQueryTimeout: // Don't translate those, just in case we use them internally. return err - case chunk.QueryError: + case chunk.QueryError, validation.LimitError: // This will be returned with status code 422 by Prometheus API. return err default: diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go index dbbc6b2e8c4f..b6144dbf8fea 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go @@ -99,7 +99,7 @@ func New(cfg Config, reg prometheus.Registerer, logger log.Logger) (Cache, error cfg.Redis.Expiration = cfg.DefaultValidity } cacheName := cfg.Prefix + "redis" - cache := NewRedisCache(cfg.Redis, cacheName, nil, logger) + cache := NewRedisCache(cacheName, NewRedisClient(&cfg.Redis), logger) caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache, reg), reg)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go index 382290e30ba8..5887bd84eedf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go @@ -2,102 +2,37 @@ package cache import ( "context" - "flag" - "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/gomodule/redigo/redis" "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/flagext" ) // RedisCache type caches chunks in redis type RedisCache struct { - name string - expiration int - timeout time.Duration - pool *redis.Pool - logger log.Logger -} - -// RedisConfig defines how a RedisCache should be constructed. -type RedisConfig struct { - Endpoint string `yaml:"endpoint"` - Timeout time.Duration `yaml:"timeout"` - Expiration time.Duration `yaml:"expiration"` - MaxIdleConns int `yaml:"max_idle_conns"` - MaxActiveConns int `yaml:"max_active_conns"` - Password flagext.Secret `yaml:"password"` - EnableTLS bool `yaml:"enable_tls"` - IdleTimeout time.Duration `yaml:"idle_timeout"` - WaitOnPoolExhaustion bool `yaml:"wait_on_pool_exhaustion"` - MaxConnLifetime time.Duration `yaml:"max_conn_lifetime"` -} - -// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet -func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { - f.StringVar(&cfg.Endpoint, prefix+"redis.endpoint", "", description+"Redis service endpoint to use when caching chunks. If empty, no redis will be used.") - f.DurationVar(&cfg.Timeout, prefix+"redis.timeout", 100*time.Millisecond, description+"Maximum time to wait before giving up on redis requests.") - f.DurationVar(&cfg.Expiration, prefix+"redis.expiration", 0, description+"How long keys stay in the redis.") - f.IntVar(&cfg.MaxIdleConns, prefix+"redis.max-idle-conns", 80, description+"Maximum number of idle connections in pool.") - f.IntVar(&cfg.MaxActiveConns, prefix+"redis.max-active-conns", 0, description+"Maximum number of active connections in pool.") - f.Var(&cfg.Password, prefix+"redis.password", description+"Password to use when connecting to redis.") - f.BoolVar(&cfg.EnableTLS, prefix+"redis.enable-tls", false, description+"Enables connecting to redis with TLS.") - f.DurationVar(&cfg.IdleTimeout, prefix+"redis.idle-timeout", 0, description+"Close connections after remaining idle for this duration. If the value is zero, then idle connections are not closed.") - f.BoolVar(&cfg.WaitOnPoolExhaustion, prefix+"redis.wait-on-pool-exhaustion", false, description+"Enables waiting if there are no idle connections. If the value is false and the pool is at the max_active_conns limit, the pool will return a connection with ErrPoolExhausted error and not wait for idle connections.") - f.DurationVar(&cfg.MaxConnLifetime, prefix+"redis.max-conn-lifetime", 0, description+"Close connections older than this duration. If the value is zero, then the pool does not close connections based on age.") + name string + redis *RedisClient + logger log.Logger } // NewRedisCache creates a new RedisCache -func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool, logger log.Logger) *RedisCache { +func NewRedisCache(name string, redisClient *RedisClient, logger log.Logger) *RedisCache { util.WarnExperimentalUse("Redis cache") - // pool != nil only in unit tests - if pool == nil { - pool = &redis.Pool{ - Dial: func() (redis.Conn, error) { - options := make([]redis.DialOption, 0, 2) - if cfg.EnableTLS { - options = append(options, redis.DialUseTLS(true)) - } - if cfg.Password.Value != "" { - options = append(options, redis.DialPassword(cfg.Password.Value)) - } - - c, err := redis.Dial("tcp", cfg.Endpoint, options...) - if err != nil { - return nil, err - } - return c, err - }, - MaxIdle: cfg.MaxIdleConns, - MaxActive: cfg.MaxActiveConns, - IdleTimeout: cfg.IdleTimeout, - Wait: cfg.WaitOnPoolExhaustion, - MaxConnLifetime: cfg.MaxConnLifetime, - } - } - cache := &RedisCache{ - expiration: int(cfg.Expiration.Seconds()), - timeout: cfg.Timeout, - name: name, - pool: pool, - logger: logger, + name: name, + redis: redisClient, + logger: logger, } - - if err := cache.ping(context.Background()); err != nil { - level.Error(logger).Log("msg", "error connecting to redis", "endpoint", cfg.Endpoint, "err", err) + if err := cache.redis.Ping(context.Background()); err != nil { + level.Error(logger).Log("msg", "error connecting to redis", "name", name, "err", err) } - return cache } // Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested. func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { - data, err := c.mget(ctx, keys) - + data, err := c.redis.MGet(ctx, keys) if err != nil { level.Error(c.logger).Log("msg", "failed to get from redis", "name", c.name, "err", err) missed = make([]string, len(keys)) @@ -117,7 +52,7 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, // Store stores the key in the cache. func (c *RedisCache) Store(ctx context.Context, keys []string, bufs [][]byte) { - err := c.mset(ctx, keys, bufs, c.expiration) + err := c.redis.MSet(ctx, keys, bufs) if err != nil { level.Error(c.logger).Log("msg", "failed to put to redis", "name", c.name, "err", err) } @@ -125,46 +60,5 @@ func (c *RedisCache) Store(ctx context.Context, keys []string, bufs [][]byte) { // Stop stops the redis client. func (c *RedisCache) Stop() { - _ = c.pool.Close() -} - -// mset adds key-value pairs to the cache. -func (c *RedisCache) mset(_ context.Context, keys []string, bufs [][]byte, ttl int) error { - conn := c.pool.Get() - defer conn.Close() - - if err := conn.Send("MULTI"); err != nil { - return err - } - for i := range keys { - if err := conn.Send("SETEX", keys[i], ttl, bufs[i]); err != nil { - return err - } - } - _, err := redis.DoWithTimeout(conn, c.timeout, "EXEC") - return err -} - -// mget retrieves values from the cache. -func (c *RedisCache) mget(_ context.Context, keys []string) ([][]byte, error) { - intf := make([]interface{}, len(keys)) - for i, key := range keys { - intf[i] = key - } - - conn := c.pool.Get() - defer conn.Close() - - return redis.ByteSlices(redis.DoWithTimeout(conn, c.timeout, "MGET", intf...)) -} - -func (c *RedisCache) ping(_ context.Context) error { - conn := c.pool.Get() - defer conn.Close() - - pong, err := redis.DoWithTimeout(conn, c.timeout, "PING") - if err == nil { - _, err = redis.String(pong, err) - } - return err + _ = c.redis.Close() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_client.go new file mode 100644 index 000000000000..df4ad5aadb3c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_client.go @@ -0,0 +1,137 @@ +package cache + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "strings" + "time" + "unsafe" + + "github.com/cortexproject/cortex/pkg/util/flagext" + + "github.com/go-redis/redis/v8" +) + +// RedisConfig defines how a RedisCache should be constructed. +type RedisConfig struct { + Endpoint string `yaml:"endpoint"` + MasterName string `yaml:"master_name"` + Timeout time.Duration `yaml:"timeout"` + Expiration time.Duration `yaml:"expiration"` + DB int `yaml:"db"` + PoolSize int `yaml:"pool_size"` + Password flagext.Secret `yaml:"password"` + EnableTLS bool `yaml:"enable_tls"` + IdleTimeout time.Duration `yaml:"idle_timeout"` + MaxConnAge time.Duration `yaml:"max_connection_age"` +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { + f.StringVar(&cfg.Endpoint, prefix+"redis.endpoint", "", description+"Redis Server endpoint to use for caching. A comma-separated list of endpoints for Redis Cluster or Redis Sentinel. If empty, no redis will be used.") + f.StringVar(&cfg.MasterName, prefix+"redis.master-name", "", description+"Redis Sentinel master name. An empty string for Redis Server or Redis Cluster.") + f.DurationVar(&cfg.Timeout, prefix+"redis.timeout", 100*time.Millisecond, description+"Maximum time to wait before giving up on redis requests.") + f.DurationVar(&cfg.Expiration, prefix+"redis.expiration", 0, description+"How long keys stay in the redis.") + f.IntVar(&cfg.DB, prefix+"redis.db", 0, description+"Database index.") + f.IntVar(&cfg.PoolSize, prefix+"redis.pool-size", 0, description+"Maximum number of connections in the pool.") + f.Var(&cfg.Password, prefix+"redis.password", description+"Password to use when connecting to redis.") + f.BoolVar(&cfg.EnableTLS, prefix+"redis.enable-tls", false, description+"Enables connecting to redis with TLS.") + f.DurationVar(&cfg.IdleTimeout, prefix+"redis.idle-timeout", 0, description+"Close connections after remaining idle for this duration. If the value is zero, then idle connections are not closed.") + f.DurationVar(&cfg.MaxConnAge, prefix+"redis.max-connection-age", 0, description+"Close connections older than this duration. If the value is zero, then the pool does not close connections based on age.") +} + +type RedisClient struct { + expiration time.Duration + timeout time.Duration + rdb redis.UniversalClient +} + +// NewRedisClient creates Redis client +func NewRedisClient(cfg *RedisConfig) *RedisClient { + opt := &redis.UniversalOptions{ + Addrs: strings.Split(cfg.Endpoint, ","), + MasterName: cfg.MasterName, + Password: cfg.Password.Value, + DB: cfg.DB, + PoolSize: cfg.PoolSize, + IdleTimeout: cfg.IdleTimeout, + MaxConnAge: cfg.MaxConnAge, + } + if cfg.EnableTLS { + opt.TLSConfig = &tls.Config{} + } + return &RedisClient{ + expiration: cfg.Expiration, + timeout: cfg.Timeout, + rdb: redis.NewUniversalClient(opt), + } +} + +func (c *RedisClient) Ping(ctx context.Context) error { + var cancel context.CancelFunc + if c.timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.timeout) + defer cancel() + } + + pong, err := c.rdb.Ping(ctx).Result() + if err != nil { + return err + } + if pong != "PONG" { + return fmt.Errorf("redis: Unexpected PING response %q", pong) + } + return nil +} + +func (c *RedisClient) MSet(ctx context.Context, keys []string, values [][]byte) error { + var cancel context.CancelFunc + if c.timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.timeout) + defer cancel() + } + + pipe := c.rdb.TxPipeline() + for i := range keys { + pipe.Set(ctx, keys[i], values[i], c.expiration) + } + _, err := pipe.Exec(ctx) + return err +} + +func (c *RedisClient) MGet(ctx context.Context, keys []string) ([][]byte, error) { + var cancel context.CancelFunc + if c.timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.timeout) + defer cancel() + } + + cmd := c.rdb.MGet(ctx, keys...) + if err := cmd.Err(); err != nil { + return nil, err + } + + ret := make([][]byte, len(keys)) + for i, val := range cmd.Val() { + if val != nil { + ret[i] = StringToBytes(val.(string)) + } + } + return ret, nil +} + +func (c *RedisClient) Close() error { + return c.rdb.Close() +} + +// StringToBytes converts string to byte slice. (copied from vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go) +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index 0c9c24879079..a509c5bfb6e4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -90,15 +90,9 @@ func (cfg *Config) Validate() error { } func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Session, error) { - consistency, err := gocql.ParseConsistencyWrapper(cfg.Consistency) - if err != nil { - return nil, errors.WithStack(err) - } - cluster := gocql.NewCluster(strings.Split(cfg.Addresses, ",")...) cluster.Port = cfg.Port cluster.Keyspace = cfg.Keyspace - cluster.Consistency = consistency cluster.BatchObserver = observer{} cluster.QueryObserver = observer{} cluster.Timeout = cfg.Timeout @@ -118,7 +112,7 @@ func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Sessi if !cfg.ConvictHosts { cluster.ConvictionPolicy = noopConvictionPolicy{} } - if err = cfg.setClusterConfig(cluster); err != nil { + if err := cfg.setClusterConfig(cluster); err != nil { return nil, errors.WithStack(err) } @@ -141,6 +135,12 @@ func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Sessi // apply config settings to a cassandra ClusterConfig func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error { + consistency, err := gocql.ParseConsistencyWrapper(cfg.Consistency) + if err != nil { + return errors.Wrap(err, "unable to parse the configured consistency") + } + + cluster.Consistency = consistency cluster.DisableInitialHostLookup = cfg.DisableInitialHostLookup if cfg.SSL { @@ -223,8 +223,6 @@ type StorageClient struct { // NewStorageClient returns a new StorageClient. func NewStorageClient(cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*StorageClient, error) { - pkgutil.WarnExperimentalUse("Cassandra Backend") - readSession, err := cfg.session("index-read", registerer) if err != nil { return nil, errors.WithStack(err) @@ -408,8 +406,6 @@ type ObjectClient struct { // NewObjectClient returns a new ObjectClient. func NewObjectClient(cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*ObjectClient, error) { - pkgutil.WarnExperimentalUse("Cassandra Backend") - readSession, err := cfg.session("chunks-read", registerer) if err != nil { return nil, errors.WithStack(err) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 6fa8ea9de5e6..8746a21dcfa8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -714,3 +714,7 @@ func (c *store) DeleteSeriesIDs(ctx context.Context, from, through model.Time, u // SeriesID is something which is only used in SeriesStore so we need not do anything here return nil } + +func (c *baseStore) GetChunkFetcher(_ model.Time) *Fetcher { + return c.fetcher +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index 46e055fcb130..a3c5a22b20ef 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -32,6 +32,7 @@ type Store interface { GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) + GetChunkFetcher(tm model.Time) *Fetcher // DeleteChunk deletes a chunks index entry and then deletes the actual chunk from chunk storage. // It takes care of chunks which are deleting partially by creating and inserting a new chunk first and then deleting the original chunk @@ -174,6 +175,22 @@ func (c compositeStore) GetChunkRefs(ctx context.Context, userID string, from, t return chunkIDs, fetchers, err } +func (c compositeStore) GetChunkFetcher(tm model.Time) *Fetcher { + // find the schema with the lowest start _after_ tm + j := sort.Search(len(c.stores), func(j int) bool { + return c.stores[j].start > tm + }) + + // reduce it by 1 because we want a schema with start <= tm + j-- + + if 0 <= j && j < len(c.stores) { + return c.stores[j].GetChunkFetcher(tm) + } + + return nil +} + // DeleteSeriesIDs deletes series IDs from index in series store func (c CompositeStore) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { return c.forStores(ctx, userID, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error { @@ -233,12 +250,6 @@ func (c compositeStore) forStores(ctx context.Context, userID string, from, thro nextSchemaStarts = c.stores[i+1].start } - // If the next schema starts at the same time as this one, - // skip this one. - if nextSchemaStarts == c.stores[i].start { - continue - } - end := min(through, nextSchemaStarts-1) err := callback(ctx, start, end, c.stores[i].Store) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index 7ea6bd0784e8..6bcd8d23085b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -116,7 +116,7 @@ func newStorageClientColumnKey(cfg Config, schemaCfg chunk.SchemaConfig, client // We hash the row key and prepend it back to the key for better distribution. // We preserve the existing key to make migrations and o11y easier. if cfg.DistributeKeys { - hashValue = hashPrefix(hashValue) + "-" + hashValue + hashValue = HashPrefix(hashValue) + "-" + hashValue } return hashValue, string(rangeValue) @@ -124,9 +124,9 @@ func newStorageClientColumnKey(cfg Config, schemaCfg chunk.SchemaConfig, client } } -// hashPrefix calculates a 64bit hash of the input string and hex-encodes +// HashPrefix calculates a 64bit hash of the input string and hex-encodes // the result, taking care to zero pad etc. -func hashPrefix(input string) string { +func HashPrefix(input string) string { prefix := hashAdd(hashNew(), input) var encodedUint64 [8]byte binary.LittleEndian.PutUint64(encodedUint64[:], prefix) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go index fb420b021596..8ec2397e8bc5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go @@ -3,6 +3,7 @@ package chunk import ( "bytes" "context" + "errors" "fmt" "io" "io/ioutil" @@ -14,6 +15,16 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) +type MockStorageMode int + +var errPermissionDenied = errors.New("permission denied") + +const ( + MockStorageModeReadWrite = 0 + MockStorageModeReadOnly = 1 + MockStorageModeWriteOnly = 2 +) + // MockStorage is a fake in-memory StorageClient. type MockStorage struct { mtx sync.RWMutex @@ -22,6 +33,7 @@ type MockStorage struct { numIndexWrites int numChunkWrites int + mode MockStorageMode } type mockTable struct { @@ -46,6 +58,10 @@ func NewMockStorage() *MockStorage { func (*MockStorage) Stop() { } +func (m *MockStorage) SetMode(mode MockStorageMode) { + m.mode = mode +} + // ListTables implements StorageClient. func (m *MockStorage) ListTables(_ context.Context) ([]string, error) { m.mtx.RLock() @@ -135,6 +151,10 @@ func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error { m.mtx.Lock() defer m.mtx.Unlock() + if m.mode == MockStorageModeReadOnly { + return errPermissionDenied + } + mockBatch := *batch.(*mockWriteBatch) seenWrites := map[string]bool{} @@ -209,6 +229,10 @@ func (m *MockStorage) QueryPages(ctx context.Context, queries []IndexQuery, call m.mtx.RLock() defer m.mtx.RUnlock() + if m.mode == MockStorageModeWriteOnly { + return errPermissionDenied + } + for _, query := range queries { err := m.query(ctx, query, func(b ReadBatch) bool { return callback(query, b) @@ -302,6 +326,10 @@ func (m *MockStorage) PutChunks(_ context.Context, chunks []Chunk) error { m.mtx.Lock() defer m.mtx.Unlock() + if m.mode == MockStorageModeReadOnly { + return errPermissionDenied + } + m.numChunkWrites += len(chunks) for i := range chunks { @@ -319,6 +347,10 @@ func (m *MockStorage) GetChunks(ctx context.Context, chunkSet []Chunk) ([]Chunk, m.mtx.RLock() defer m.mtx.RUnlock() + if m.mode == MockStorageModeWriteOnly { + return nil, errPermissionDenied + } + decodeContext := NewDecodeContext() result := []Chunk{} for _, chunk := range chunkSet { @@ -337,6 +369,10 @@ func (m *MockStorage) GetChunks(ctx context.Context, chunkSet []Chunk) ([]Chunk, // DeleteChunk implements StorageClient. func (m *MockStorage) DeleteChunk(ctx context.Context, userID, chunkID string) error { + if m.mode == MockStorageModeReadOnly { + return errPermissionDenied + } + return m.DeleteObject(ctx, chunkID) } @@ -344,6 +380,10 @@ func (m *MockStorage) GetObject(ctx context.Context, objectKey string) (io.ReadC m.mtx.RLock() defer m.mtx.RUnlock() + if m.mode == MockStorageModeWriteOnly { + return nil, errPermissionDenied + } + buf, ok := m.objects[objectKey] if !ok { return nil, ErrStorageObjectNotFound @@ -358,6 +398,10 @@ func (m *MockStorage) PutObject(ctx context.Context, objectKey string, object io return err } + if m.mode == MockStorageModeReadOnly { + return errPermissionDenied + } + m.mtx.Lock() defer m.mtx.Unlock() @@ -369,6 +413,10 @@ func (m *MockStorage) DeleteObject(ctx context.Context, objectKey string) error m.mtx.Lock() defer m.mtx.Unlock() + if m.mode == MockStorageModeReadOnly { + return errPermissionDenied + } + if _, ok := m.objects[objectKey]; !ok { return ErrStorageObjectNotFound } @@ -381,6 +429,10 @@ func (m *MockStorage) List(ctx context.Context, prefix string) ([]StorageObject, m.mtx.RLock() defer m.mtx.RUnlock() + if m.mode == MockStorageModeWriteOnly { + return nil, nil, errPermissionDenied + } + storageObjects := make([]StorageObject, 0, len(m.objects)) for key := range m.objects { // ToDo: Store mtime when we have mtime based use-cases for storage objects diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go index a3d3e24d262f..9f7b4a1152be 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go @@ -111,5 +111,9 @@ func (o *Client) getChunk(ctx context.Context, decodeContext *chunk.DecodeContex // GetChunks retrieves the specified chunks from the configured backend func (o *Client) DeleteChunk(ctx context.Context, userID, chunkID string) error { - return o.store.DeleteObject(ctx, chunkID) + key := chunkID + if o.keyEncoder != nil { + key = o.keyEncoder(key) + } + return o.store.DeleteObject(ctx, key) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go index 5d2df4a4f60d..8f419e1f59f8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" @@ -26,9 +27,11 @@ import ( ) const ( - millisecondPerDay = int64(24 * time.Hour / time.Millisecond) - statusSuccess = "success" - statusFail = "fail" + millisecondPerDay = int64(24 * time.Hour / time.Millisecond) + statusSuccess = "success" + statusFail = "fail" + loadRequestsInterval = time.Hour + retryFailedRequestsInterval = 15 * time.Minute ) type purgerMetrics struct { @@ -186,15 +189,20 @@ func (p *Purger) loop(ctx context.Context) error { // load requests on startup instead of waiting for first ticker loadRequests() - loadRequestsTicker := time.NewTicker(time.Hour) + loadRequestsTicker := time.NewTicker(loadRequestsInterval) defer loadRequestsTicker.Stop() + retryFailedRequestsTicker := time.NewTicker(retryFailedRequestsInterval) + defer retryFailedRequestsTicker.Stop() + for { select { case <-loadRequestsTicker.C: loadRequests() case <-p.pullNewRequestsChan: loadRequests() + case <-retryFailedRequestsTicker.C: + p.retryFailedRequests() case <-ctx.Done(): return nil } @@ -207,6 +215,25 @@ func (p *Purger) stop(_ error) error { return nil } +func (p *Purger) retryFailedRequests() { + userIDsWithFailedRequest := p.inProcessRequests.listUsersWithFailedRequest() + + for _, userID := range userIDsWithFailedRequest { + deleteRequest := p.inProcessRequests.get(userID) + if deleteRequest == nil { + level.Error(util.Logger).Log("msg", "expected an in-process delete request", "user", userID) + continue + } + + p.inProcessRequests.unsetFailedRequestForUser(userID) + err := p.resumeStalledRequest(*deleteRequest) + if err != nil { + reqWithLogger := makeDeleteRequestWithLogger(*deleteRequest, util.Logger) + level.Error(reqWithLogger.logger).Log("msg", "failed to resume failed request", "err", err) + } + } +} + func (p *Purger) workerJobCleanup(job workerJob) { err := p.removeDeletePlan(context.Background(), job.userID, job.deleteRequestID, job.planNo) if err != nil { @@ -296,9 +323,15 @@ func (p *Purger) worker() { } } -func (p *Purger) executePlan(userID, requestID string, planNo int, logger log.Logger) error { +func (p *Purger) executePlan(userID, requestID string, planNo int, logger log.Logger) (err error) { logger = log.With(logger, "plan_no", planNo) + defer func() { + if err != nil { + p.inProcessRequests.setFailedRequestForUser(userID) + } + }() + plan, err := p.getDeletePlan(context.Background(), userID, requestID, planNo) if err != nil { if err == chunk.ErrStorageObjectNotFound { @@ -354,45 +387,54 @@ func (p *Purger) executePlan(userID, requestID string, planNo int, logger log.Lo level.Info(logger).Log("msg", "finished execution of plan") - return nil + return } // we need to load all in process delete requests on startup to finish them first func (p *Purger) loadInprocessDeleteRequests() error { - requestsWithBuildingPlanStatus, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusBuildingPlan) + inprocessRequests, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusBuildingPlan) if err != nil { return err } - for i := range requestsWithBuildingPlanStatus { - deleteRequest := requestsWithBuildingPlanStatus[i] - req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) + requestsWithDeletingStatus, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusDeleting) + if err != nil { + return err + } + + inprocessRequests = append(inprocessRequests, requestsWithDeletingStatus...) + + for i := range inprocessRequests { + deleteRequest := inprocessRequests[i] p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) + req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) - level.Info(req.logger).Log("msg", "loaded in process delete requests with status building plan") + level.Info(req.logger).Log("msg", "resuming in process delete requests", "status", deleteRequest.Status) + err = p.resumeStalledRequest(deleteRequest) + if err != nil { + level.Error(req.logger).Log("msg", "failed to resume stalled request", "err", err) + } + + } + return nil +} + +func (p *Purger) resumeStalledRequest(deleteRequest DeleteRequest) error { + req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) + + if deleteRequest.Status == StatusBuildingPlan { err := p.buildDeletePlan(req) if err != nil { p.metrics.deleteRequestsProcessingFailures.WithLabelValues(deleteRequest.UserID).Inc() - level.Error(req.logger).Log("msg", "error building delete plan", "err", err) - continue + return errors.Wrap(err, "failed to build delete plan") } - level.Info(req.logger).Log("msg", "sending delete request for execution") - p.executePlansChan <- req + deleteRequest.Status = StatusDeleting } - requestsWithDeletingStatus, err := p.deleteStore.GetDeleteRequestsByStatus(context.Background(), StatusDeleting) - if err != nil { - return err - } - - for i := range requestsWithDeletingStatus { - deleteRequest := requestsWithDeletingStatus[i] - req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) - level.Info(req.logger).Log("msg", "loaded in process delete requests with status deleting") - - p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) + if deleteRequest.Status == StatusDeleting { + level.Info(req.logger).Log("msg", "sending delete request for execution") p.executePlansChan <- req } @@ -448,6 +490,7 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { return err } + deleteRequest.Status = StatusBuildingPlan p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) @@ -483,10 +526,19 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { // A days plan will include chunk ids and labels of all the chunks which are supposed to be deleted. // Chunks are grouped together by labels to avoid storing labels repetitively. // After building delete plans it updates status of delete request to StatusDeleting and sends it for execution -func (p *Purger) buildDeletePlan(req deleteRequestWithLogger) error { +func (p *Purger) buildDeletePlan(req deleteRequestWithLogger) (err error) { ctx := context.Background() ctx = user.InjectOrgID(ctx, req.UserID) + defer func() { + if err != nil { + p.inProcessRequests.setFailedRequestForUser(req.UserID) + } else { + req.Status = StatusDeleting + p.inProcessRequests.set(req.UserID, &req.DeleteRequest) + } + }() + perDayTimeRange := splitByDay(req.StartTime, req.EndTime) level.Info(req.logger).Log("msg", "building delete plan", "num_plans", len(perDayTimeRange)) @@ -531,21 +583,21 @@ func (p *Purger) buildDeletePlan(req deleteRequestWithLogger) error { plans[i] = pb } - err := p.putDeletePlans(ctx, req.UserID, req.RequestID, plans) + err = p.putDeletePlans(ctx, req.UserID, req.RequestID, plans) if err != nil { - return err + return } err = p.deleteStore.UpdateStatus(ctx, req.UserID, req.RequestID, StatusDeleting) if err != nil { - return err + return } p.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(req.UserID).Add(float64(len(includedChunkIDs))) level.Info(req.logger).Log("msg", "built delete plans", "num_plans", len(perDayTimeRange)) - return nil + return } func (p *Purger) putDeletePlans(ctx context.Context, userID, requestID string, plans [][]byte) error { @@ -695,12 +747,16 @@ func makeDeleteRequestWithLogger(deleteRequest DeleteRequest, l log.Logger) dele // inProcessRequestsCollection stores DeleteRequests which are in process by each user. // Currently we only allow processing of one delete request per user so it stores single DeleteRequest per user. type inProcessRequestsCollection struct { - requests map[string]*DeleteRequest - mtx sync.RWMutex + requests map[string]*DeleteRequest + usersWithFailedRequests map[string]struct{} + mtx sync.RWMutex } func newInProcessRequestsCollection() *inProcessRequestsCollection { - return &inProcessRequestsCollection{requests: map[string]*DeleteRequest{}} + return &inProcessRequestsCollection{ + requests: map[string]*DeleteRequest{}, + usersWithFailedRequests: map[string]struct{}{}, + } } func (i *inProcessRequestsCollection) set(userID string, request *DeleteRequest) { @@ -744,3 +800,29 @@ func (i *inProcessRequestsCollection) getOldest() *DeleteRequest { return oldestRequest } + +func (i *inProcessRequestsCollection) setFailedRequestForUser(userID string) { + i.mtx.Lock() + defer i.mtx.Unlock() + + i.usersWithFailedRequests[userID] = struct{}{} +} + +func (i *inProcessRequestsCollection) unsetFailedRequestForUser(userID string) { + i.mtx.Lock() + defer i.mtx.Unlock() + + delete(i.usersWithFailedRequests, userID) +} + +func (i *inProcessRequestsCollection) listUsersWithFailedRequest() []string { + i.mtx.RLock() + defer i.mtx.RUnlock() + + userIDs := make([]string, 0, len(i.usersWithFailedRequests)) + for userID := range i.usersWithFailedRequests { + userIDs = append(userIDs, userID) + } + + return userIDs +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 405dac5359e1..b5d5b20c4589 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -25,10 +25,11 @@ const ( ) var ( - errInvalidSchemaVersion = errors.New("invalid schema version") - errInvalidTablePeriod = errors.New("the table period must be a multiple of 24h (1h for schema v1)") - errConfigFileNotSet = errors.New("schema config file needs to be set") - errConfigChunkPrefixNotSet = errors.New("schema config for chunks is missing the 'prefix' setting") + errInvalidSchemaVersion = errors.New("invalid schema version") + errInvalidTablePeriod = errors.New("the table period must be a multiple of 24h (1h for schema v1)") + errConfigFileNotSet = errors.New("schema config file needs to be set") + errConfigChunkPrefixNotSet = errors.New("schema config for chunks is missing the 'prefix' setting") + errSchemaIncreasingFromTime = errors.New("from time in schemas must be distinct and in increasing order") ) // PeriodConfig defines the schema and tables to use for a period of time @@ -120,6 +121,12 @@ func (cfg *SchemaConfig) Validate() error { if err := periodCfg.validate(); err != nil { return err } + + if i+1 < len(cfg.Configs) { + if cfg.Configs[i].From.Time.Unix() >= cfg.Configs[i+1].From.Time.Unix() { + return errSchemaIncreasingFromTime + } + } } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 32d51ad1fc47..c0a8fb438485 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -92,7 +92,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Swift.RegisterFlags(f) cfg.GrpcConfig.RegisterFlags(f) - f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or blocks. Be aware that blocks storage is experimental and shouldn't be used in production.") + f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or blocks.") cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f) f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go index 02f3db1e5c74..31583b9faa89 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go @@ -108,14 +108,13 @@ func dummyChunkFor(from, through model.Time, metric labels.Labels) chunk.Chunk { return chunk } -func SetupTestChunkStore() (chunk.Store, error) { +func SetupTestChunkStoreWithClients(indexClient chunk.IndexClient, chunksClient chunk.Client, tableClient chunk.TableClient) (chunk.Store, error) { var ( tbmConfig chunk.TableManagerConfig schemaCfg = chunk.DefaultSchemaConfig("", "v10", 0) ) flagext.DefaultValues(&tbmConfig) - storage := chunk.NewMockStorage() - tableManager, err := chunk.NewTableManager(tbmConfig, schemaCfg, 12*time.Hour, storage, nil, nil, nil) + tableManager, err := chunk.NewTableManager(tbmConfig, schemaCfg, 12*time.Hour, tableClient, nil, nil, nil) if err != nil { return nil, err } @@ -137,7 +136,7 @@ func SetupTestChunkStore() (chunk.Store, error) { flagext.DefaultValues(&storeCfg) store := chunk.NewCompositeStore(nil) - err = store.AddPeriod(storeCfg, schemaCfg.Configs[0], storage, storage, overrides, cache.NewNoopCache(), cache.NewNoopCache()) + err = store.AddPeriod(storeCfg, schemaCfg.Configs[0], indexClient, chunksClient, overrides, cache.NewNoopCache(), cache.NewNoopCache()) if err != nil { return nil, err } @@ -145,6 +144,11 @@ func SetupTestChunkStore() (chunk.Store, error) { return store, nil } +func SetupTestChunkStore() (chunk.Store, error) { + storage := chunk.NewMockStorage() + return SetupTestChunkStoreWithClients(storage, storage, storage) +} + func SetupTestObjectStore() (chunk.ObjectClient, error) { return chunk.NewMockStorage(), nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go index 5418ba87fe2e..c52b18c070d1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go @@ -18,6 +18,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/gorilla/mux" amconfig "github.com/prometheus/alertmanager/config" + amtemplate "github.com/prometheus/alertmanager/template" "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/configs/db" @@ -247,7 +248,7 @@ func validateRulesFiles(c userconfig.Config) error { func validateTemplateFiles(c userconfig.Config) error { for fn, content := range c.TemplateFiles { - if _, err := template.New(fn).Parse(content); err != nil { + if _, err := template.New(fn).Funcs(template.FuncMap(amtemplate.DefaultFuncs)).Parse(content); err != nil { return err } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go index 0b61f4feaa3a..4e67b46a94e8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -10,9 +10,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/tracing" "github.com/weaveworks/common/server" "github.com/weaveworks/common/signals" "google.golang.org/grpc/health/grpc_health_v1" @@ -110,7 +108,7 @@ type Config struct { func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Server.MetricsNamespace = "cortex" c.Server.ExcludeRequestInLog = true - f.StringVar(&c.Target, "target", All, "The Cortex service to run. Use \"-modules\" command line flag to get a list of available options.") + f.StringVar(&c.Target, "target", All, "The Cortex module to run. Use \"-modules\" command line flag to get a list of available modules, and to see which modules are included in \"All\".") f.BoolVar(&c.ListModules, "modules", false, "List available values to be use as target. Cannot be used in YAML config.") f.BoolVar(&c.AuthEnabled, "auth.enabled", true, "Set to false to disable auth.") f.BoolVar(&c.PrintConfig, "print.config", false, "Print the config and exit.") @@ -272,13 +270,8 @@ func New(cfg Config) (*Cortex, error) { // setupThanosTracing appends a gRPC middleware used to inject our tracer into the custom // context used by Thanos, in order to get Thanos spans correctly attached to our traces. func (t *Cortex) setupThanosTracing() { - t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, - tracing.UnaryServerInterceptor(opentracing.GlobalTracer()), - ) - - t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, - tracing.StreamServerInterceptor(opentracing.GlobalTracer()), - ) + t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, ThanosTracerUnaryInterceptor) + t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, ThanosTracerStreamInterceptor) } // Run starts Cortex running, and blocks until a Cortex stops. diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index f833fc889976..51a3576dda65 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -35,6 +35,7 @@ import ( "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" @@ -49,7 +50,9 @@ const ( Overrides string = "overrides" Server string = "server" Distributor string = "distributor" + DistributorService string = "distributor-service" Ingester string = "ingester" + IngesterService string = "ingester-service" Flusher string = "flusher" Querier string = "querier" StoreQueryable string = "store-queryable" @@ -79,7 +82,7 @@ func (t *Cortex) initAPI() (services.Service, error) { t.API = a - t.API.RegisterAPI(t.Cfg) + t.API.RegisterAPI(t.Cfg.Server.PathPrefix, t.Cfg) return nil, nil } @@ -124,6 +127,19 @@ func (t *Cortex) initRing() (serv services.Service, err error) { } func (t *Cortex) initRuntimeConfig() (services.Service, error) { + // We need to modify LimitsConfig before calling SetDefaultLimitsForYAMLUnmarshalling later in this method + // but also if runtime-config is not used, for setting limits used by initOverrides. + // TODO: Remove this in Cortex 1.6. + if t.Cfg.Ruler.EvaluationDelay != 0 && t.Cfg.LimitsConfig.RulerEvaluationDelay == 0 { + t.Cfg.LimitsConfig.RulerEvaluationDelay = t.Cfg.Ruler.EvaluationDelay + + // No need to report if this field isn't going to be used. + if t.Cfg.Target == All || t.Cfg.Target == Ruler { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(util.Logger).Log("msg", "Using DEPRECATED YAML config field ruler.evaluation_delay_duration, please use limits.ruler_evaluation_delay_duration instead.") + } + } + if t.Cfg.RuntimeConfig.LoadPath == "" { t.Cfg.RuntimeConfig.LoadPath = t.Cfg.LimitsConfig.PerTenantOverrideConfig t.Cfg.RuntimeConfig.ReloadPeriod = t.Cfg.LimitsConfig.PerTenantOverridePeriod @@ -150,7 +166,7 @@ func (t *Cortex) initOverrides() (serv services.Service, err error) { return nil, err } -func (t *Cortex) initDistributor() (serv services.Service, err error) { +func (t *Cortex) initDistributorService() (serv services.Service, err error) { t.Cfg.Distributor.DistributorRing.ListenPort = t.Cfg.Server.GRPCListenPort // Check whether the distributor can join the distributors ring, which is @@ -163,9 +179,13 @@ func (t *Cortex) initDistributor() (serv services.Service, err error) { return } + return t.Distributor, nil +} + +func (t *Cortex) initDistributor() (serv services.Service, err error) { t.API.RegisterDistributor(t.Distributor, t.Cfg.Distributor) - return t.Distributor, nil + return nil, nil } func (t *Cortex) initQuerier() (serv services.Service, err error) { @@ -293,7 +313,7 @@ func (t *Cortex) tsdbIngesterConfig() { t.Cfg.Ingester.BlocksStorageConfig = t.Cfg.BlocksStorage } -func (t *Cortex) initIngester() (serv services.Service, err error) { +func (t *Cortex) initIngesterService() (serv services.Service, err error) { t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.RuntimeConfig) t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.Ingester.ShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels @@ -304,9 +324,13 @@ func (t *Cortex) initIngester() (serv services.Service, err error) { return } + return t.Ingester, nil +} + +func (t *Cortex) initIngester() (serv services.Service, err error) { t.API.RegisterIngester(t.Ingester, t.Cfg.Distributor) - return t.Ingester, nil + return nil, nil } func (t *Cortex) initFlusher() (serv services.Service, err error) { @@ -316,6 +340,7 @@ func (t *Cortex) initFlusher() (serv services.Service, err error) { t.Cfg.Flusher, t.Cfg.Ingester, t.Store, + t.Overrides, prometheus.DefaultRegisterer, ) if err != nil { @@ -504,7 +529,7 @@ func (t *Cortex) initRuler() (serv services.Service, err error) { rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer) - managerFactory := ruler.DefaultTenantManagerFactory(t.Cfg.Ruler, t.Distributor, queryable, engine) + managerFactory := ruler.DefaultTenantManagerFactory(t.Cfg.Ruler, t.Distributor, queryable, engine, t.Overrides) manager, err := ruler.NewDefaultMultiTenantManager(t.Cfg.Ruler, managerFactory, prometheus.DefaultRegisterer, util.Logger) if err != nil { return nil, err @@ -587,7 +612,7 @@ func (t *Cortex) initMemberlistKV() (services.Service, error) { t.Cfg.MemberlistKV.Codecs = []codec.Codec{ ring.GetCodec(), } - t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV) + t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV, util.Logger) // Update the config. t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV @@ -631,9 +656,11 @@ func (t *Cortex) setupModuleManager() error { mm.RegisterModule(Ring, t.initRing, modules.UserInvisibleModule) mm.RegisterModule(Overrides, t.initOverrides, modules.UserInvisibleModule) mm.RegisterModule(Distributor, t.initDistributor) + mm.RegisterModule(DistributorService, t.initDistributorService, modules.UserInvisibleModule) mm.RegisterModule(Store, t.initChunkStore, modules.UserInvisibleModule) mm.RegisterModule(DeleteRequestsStore, t.initDeleteRequestsStore, modules.UserInvisibleModule) mm.RegisterModule(Ingester, t.initIngester) + mm.RegisterModule(IngesterService, t.initIngesterService, modules.UserInvisibleModule) mm.RegisterModule(Flusher, t.initFlusher) mm.RegisterModule(Querier, t.initQuerier) mm.RegisterModule(StoreQueryable, t.initStoreQueryables, modules.UserInvisibleModule) @@ -650,24 +677,26 @@ func (t *Cortex) setupModuleManager() error { // Add dependencies deps := map[string][]string{ - API: {Server}, - Ring: {API, RuntimeConfig, MemberlistKV}, - Overrides: {RuntimeConfig}, - Distributor: {Ring, API, Overrides}, - Store: {Overrides, DeleteRequestsStore}, - Ingester: {Overrides, Store, API, RuntimeConfig, MemberlistKV}, - Flusher: {Store, API}, - Querier: {Overrides, Distributor, Store, Ring, API, StoreQueryable, MemberlistKV}, - StoreQueryable: {Overrides, Store}, - QueryFrontend: {API, Overrides, DeleteRequestsStore}, - TableManager: {API}, - Ruler: {Overrides, Distributor, Store, StoreQueryable, RulerStorage}, - Configs: {API}, - AlertManager: {API}, - Compactor: {API, MemberlistKV}, - StoreGateway: {API, Overrides, MemberlistKV}, - Purger: {Store, DeleteRequestsStore, API}, - All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, + API: {Server}, + Ring: {API, RuntimeConfig, MemberlistKV}, + Overrides: {RuntimeConfig}, + Distributor: {DistributorService, API}, + DistributorService: {Ring, Overrides}, + Store: {Overrides, DeleteRequestsStore}, + Ingester: {IngesterService, API}, + IngesterService: {Overrides, Store, RuntimeConfig, MemberlistKV}, + Flusher: {Store, API}, + Querier: {Overrides, DistributorService, Store, Ring, API, StoreQueryable, MemberlistKV}, + StoreQueryable: {Overrides, Store, MemberlistKV}, + QueryFrontend: {API, Overrides, DeleteRequestsStore}, + TableManager: {API}, + Ruler: {Overrides, DistributorService, Store, StoreQueryable, RulerStorage}, + Configs: {API}, + AlertManager: {API}, + Compactor: {API, MemberlistKV}, + StoreGateway: {API, Overrides, MemberlistKV}, + Purger: {Store, DeleteRequestsStore, API}, + All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, } for mod, targets := range deps { if err := mm.AddDependency(mod, targets...); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go new file mode 100644 index 000000000000..daa05e154d3d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/tracing.go @@ -0,0 +1,33 @@ +package cortex + +import ( + "context" + + "github.com/opentracing/opentracing-go" + "github.com/thanos-io/thanos/pkg/tracing" + "google.golang.org/grpc" +) + +// ThanosTracerUnaryInterceptor injects the opentracing global tracer into the context +// in order to get it picked up by Thanos components. +func ThanosTracerUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return handler(tracing.ContextWithTracer(ctx, opentracing.GlobalTracer()), req) +} + +// ThanosTracerStreamInterceptor injects the opentracing global tracer into the context +// in order to get it picked up by Thanos components. +func ThanosTracerStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return handler(srv, wrappedServerStream{ + ctx: tracing.ContextWithTracer(ss.Context(), opentracing.GlobalTracer()), + ServerStream: ss, + }) +} + +type wrappedServerStream struct { + ctx context.Context + grpc.ServerStream +} + +func (ss wrappedServerStream) Context() context.Context { + return ss.ctx +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index 0f315479b3ac..20085123f78b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -152,8 +152,8 @@ type Config struct { // Distributors ring DistributorRing RingConfig `yaml:"ring"` - // for testing - ingesterClientFactory ring_client.PoolFactory `yaml:"-"` + // for testing and for extending the ingester by adding calls to the client + IngesterClientFactory ring_client.PoolFactory `yaml:"-"` // when true the distributor does not validate the label name, Cortex doesn't directly use // this (and should never use it) but this feature is used by other projects built on top of it @@ -179,8 +179,8 @@ func (cfg *Config) Validate() error { // New constructs a new Distributor func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Overrides, ingestersRing ring.ReadRing, canJoinDistributorsRing bool, reg prometheus.Registerer) (*Distributor, error) { - if cfg.ingesterClientFactory == nil { - cfg.ingesterClientFactory = func(addr string) (ring_client.PoolClient, error) { + if cfg.IngesterClientFactory == nil { + cfg.IngesterClientFactory = func(addr string) (ring_client.PoolClient, error) { return ingester_client.MakeIngesterClient(addr, clientConfig) } } @@ -220,7 +220,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove d := &Distributor{ cfg: cfg, ingestersRing: ingestersRing, - ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.ingesterClientFactory, util.Logger), + ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, util.Logger), distributorsRing: distributorsRing, limits: limits, ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), @@ -603,8 +603,8 @@ func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, time return err } -// forAllIngesters runs f, in parallel, for all ingesters -func (d *Distributor) forAllIngesters(ctx context.Context, reallyAll bool, f func(client.IngesterClient) (interface{}, error)) ([]interface{}, error) { +// ForAllIngesters runs f, in parallel, for all ingesters +func (d *Distributor) ForAllIngesters(ctx context.Context, reallyAll bool, f func(client.IngesterClient) (interface{}, error)) ([]interface{}, error) { replicationSet, err := d.ingestersRing.GetAll(ring.Read) if err != nil { return nil, err @@ -628,7 +628,7 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, labelName mod req := &client.LabelValuesRequest{ LabelName: string(labelName), } - resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + resps, err := d.ForAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { return client.LabelValues(ctx, req) }) if err != nil { @@ -652,7 +652,7 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, labelName mod // LabelNames returns all of the label names. func (d *Distributor) LabelNames(ctx context.Context) ([]string, error) { req := &client.LabelNamesRequest{} - resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + resps, err := d.ForAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { return client.LabelNames(ctx, req) }) if err != nil { @@ -684,7 +684,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through return nil, err } - resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + resps, err := d.ForAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { return client.MetricsForLabelMatchers(ctx, req) }) if err != nil { @@ -712,7 +712,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) { req := &ingester_client.MetricsMetadataRequest{} // TODO(gotjosh): We only need to look in all the ingesters if shardByAllLabels is enabled. - resps, err := d.forAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { + resps, err := d.ForAllIngesters(ctx, false, func(client client.IngesterClient) (interface{}, error) { return client.MetricsMetadata(ctx, req) }) if err != nil { @@ -746,7 +746,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad // UserStats returns statistics about the current user. func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { req := &client.UserStatsRequest{} - resps, err := d.forAllIngesters(ctx, true, func(client client.IngesterClient) (interface{}, error) { + resps, err := d.ForAllIngesters(ctx, true, func(client client.IngesterClient) (interface{}, error) { return client.UserStats(ctx, req) }) if err != nil { @@ -782,7 +782,7 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { req := &client.UserStatsRequest{} ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID - // Not using d.forAllIngesters(), so we can fail after first error. + // Not using d.ForAllIngesters(), so we can fail after first error. replicationSet, err := d.ingestersRing.GetAll(ring.Read) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go index 9f96d2129561..6157599a8758 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go +++ b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go @@ -12,6 +12,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/validation" ) // Config for an Ingester. @@ -38,6 +39,7 @@ type Flusher struct { cfg Config ingesterConfig ingester.Config chunkStore ingester.ChunkStore + limits *validation.Overrides registerer prometheus.Registerer } @@ -51,6 +53,7 @@ func New( cfg Config, ingesterConfig ingester.Config, chunkStore ingester.ChunkStore, + limits *validation.Overrides, registerer prometheus.Registerer, ) (*Flusher, error) { @@ -63,6 +66,7 @@ func New( cfg: cfg, ingesterConfig: ingesterConfig, chunkStore: chunkStore, + limits: limits, registerer: registerer, } f.Service = services.NewBasicService(nil, f.running, nil) @@ -70,7 +74,7 @@ func New( } func (f *Flusher) running(ctx context.Context) error { - ing, err := ingester.NewForFlusher(f.ingesterConfig, f.chunkStore, f.registerer) + ing, err := ingester.NewForFlusher(f.ingesterConfig, f.chunkStore, f.limits, f.registerer) if err != nil { return errors.Wrap(err, "create ingester") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go index 19eade8d6d78..5a682f60de49 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -262,7 +262,7 @@ func (i *Ingester) startFlushLoops() { // Compared to the 'New' method: // * Always replays the WAL. // * Does not start the lifecycler. -func NewForFlusher(cfg Config, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { +func NewForFlusher(cfg Config, chunkStore ChunkStore, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { if cfg.BlocksStorageEnabled { return NewV2ForFlusher(cfg, registerer) } @@ -273,9 +273,10 @@ func NewForFlusher(cfg Config, chunkStore ChunkStore, registerer prometheus.Regi chunkStore: chunkStore, flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), wal: &noopWAL{}, + limits: limits, } - i.BasicService = services.NewBasicService(i.startingForFlusher, i.loop, i.stopping) + i.BasicService = services.NewBasicService(i.startingForFlusher, i.loopForFlusher, i.stopping) return i, nil } @@ -297,6 +298,18 @@ func (i *Ingester) startingForFlusher(ctx context.Context) error { return nil } +func (i *Ingester) loopForFlusher(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + + case err := <-i.subservicesWatcher.Chan(): + return errors.Wrap(err, "ingester subservice failed") + } + } +} + func (i *Ingester) loop(ctx context.Context) error { flushTicker := time.NewTicker(i.cfg.FlushCheckPeriod) defer flushTicker.Stop() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index c422cca8fd57..52a7a223481f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -184,7 +184,6 @@ func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer // NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage. func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { - util.WarnExperimentalUse("Blocks storage engine") bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") @@ -231,7 +230,6 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, // Special version of ingester used by Flusher. This ingester is not ingesting anything, its only purpose is to react // on Flush method and flush all openened TSDBs when called. func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, error) { - util.WarnExperimentalUse("Blocks storage engine") bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go index ee009e3b11b9..22ebaa285797 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go @@ -126,8 +126,6 @@ type BlocksStoreQueryable struct { } func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consistency *BlocksConsistencyChecker, limits BlocksStoreLimits, queryStoreAfter time.Duration, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { - util.WarnExperimentalUse("Blocks storage engine") - manager, err := services.NewManager(stores, finder) if err != nil { return nil, errors.Wrap(err, "register blocks storage queryable subservices") diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index 04d266a39f96..5bd089fa4110 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -57,8 +57,6 @@ type Config struct { // LookbackDelta determines the time since the last sample after which a time // series is considered stale. LookbackDelta time.Duration `yaml:"lookback_delta"` - // This is used for the deprecated flag -promql.lookback-delta. - legacyLookbackDelta time.Duration // Blocks storage only. StoreGatewayAddresses string `yaml:"store_gateway_addresses"` @@ -72,13 +70,9 @@ var ( errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") ) -const ( - defaultLookbackDelta = 5 * time.Minute -) - // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.StoreGatewayClient.RegisterFlagsWithPrefix("experimental.querier.store-gateway-client", f) + cfg.StoreGatewayClient.RegisterFlagsWithPrefix("querier.store-gateway-client", f) f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, "The timeout for a query.") f.BoolVar(&cfg.Iterators, "querier.iterators", false, "Use iterators to execute query, as opposed to fully materialising the series in memory.") @@ -88,12 +82,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") - f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the experimental blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") + f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.StringVar(&cfg.ActiveQueryTrackerDir, "querier.active-query-tracker-dir", "./active-query-tracker", "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.") - f.StringVar(&cfg.StoreGatewayAddresses, "experimental.querier.store-gateway-addresses", "", "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the experimental blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).") - f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", defaultLookbackDelta, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") - // TODO: Remove this flag in v1.4.0. - f.DurationVar(&cfg.legacyLookbackDelta, "promql.lookback-delta", defaultLookbackDelta, "[DEPRECATED] Time since the last sample after which a time series is considered stale and ignored by expression evaluations. Please use -querier.lookback-delta instead.") + f.StringVar(&cfg.StoreGatewayAddresses, "querier.store-gateway-addresses", "", "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).") + f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") f.StringVar(&cfg.SecondStoreEngine, "querier.second-store-engine", "", "Second store engine to use for querying. Empty = disabled.") f.Var(&cfg.UseSecondStoreBeforeTime, "querier.use-second-store-before-time", "If specified, second store is only used for queries before this timestamp. Default value 0 means secondary store is always queried.") } @@ -157,23 +149,13 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor return lazyquery.NewLazyQuerier(querier), nil }) - lookbackDelta := cfg.LookbackDelta - if cfg.LookbackDelta == defaultLookbackDelta && cfg.legacyLookbackDelta != defaultLookbackDelta { - // If the old flag was set to some other value than the default, it means - // the old flag was used and not the new flag. - lookbackDelta = cfg.legacyLookbackDelta - - flagext.DeprecatedFlagsUsed.Inc() - level.Warn(util.Logger).Log("msg", "Using deprecated flag -promql.lookback-delta, use -querier.lookback-delta instead") - } - engine := promql.NewEngine(promql.EngineOpts{ Logger: util.Logger, Reg: reg, ActiveQueryTracker: createActiveQueryTracker(cfg), MaxSamples: cfg.MaxSamples, Timeout: cfg.Timeout, - LookbackDelta: lookbackDelta, + LookbackDelta: cfg.LookbackDelta, NoStepSubqueryIntervalFn: func(int64) int64 { return cfg.DefaultEvaluationInterval.Milliseconds() }, @@ -303,7 +285,9 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat startTime := model.Time(sp.Start) endTime := model.Time(sp.End) if maxQueryLength := q.limits.MaxQueryLength(userID); maxQueryLength > 0 && endTime.Sub(startTime) > maxQueryLength { - return storage.ErrSeriesSet(fmt.Errorf(validation.ErrQueryTooLong, endTime.Sub(startTime), maxQueryLength)) + limitErr := validation.LimitError(fmt.Sprintf(validation.ErrQueryTooLong, endTime.Sub(startTime), maxQueryLength)) + return storage.ErrSeriesSet(limitErr) + } tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, startTime, endTime) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go index f61a0be6ac95..5958bb7f78ee 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go @@ -40,8 +40,7 @@ type CacheGenNumberLoader interface { // ResultsCacheConfig is the config for the results cache. type ResultsCacheConfig struct { - CacheConfig cache.Config `yaml:"cache"` - LegacyMaxCacheFreshness time.Duration `yaml:"max_freshness" doc:"hidden"` // TODO: (deprecated) remove in Cortex v1.4.0 + CacheConfig cache.Config `yaml:"cache"` } // RegisterFlags registers flags. @@ -182,11 +181,7 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { response Response ) - // check if cache freshness value is provided in legacy config - maxCacheFreshness := s.cfg.LegacyMaxCacheFreshness - if maxCacheFreshness == time.Duration(0) { - maxCacheFreshness = s.limits.MaxCacheFreshness(userID) - } + maxCacheFreshness := s.limits.MaxCacheFreshness(userID) maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) if r.GetStart() > maxCacheTime { return s.next.Do(ctx, r) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go index 65caeb6750c0..de0acddb0d65 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go @@ -4,6 +4,8 @@ import ( "context" "sync" + "github.com/go-kit/kit/log" + "github.com/cortexproject/cortex/pkg/util/services" ) @@ -13,7 +15,8 @@ type KVInitService struct { services.Service // config used for initialization - cfg *KVConfig + cfg *KVConfig + logger log.Logger // init function, to avoid multiple initializations. init sync.Once @@ -24,10 +27,11 @@ type KVInitService struct { watcher *services.FailureWatcher } -func NewKVInitService(cfg *KVConfig) *KVInitService { +func NewKVInitService(cfg *KVConfig, logger log.Logger) *KVInitService { kvinit := &KVInitService{ cfg: cfg, watcher: services.NewFailureWatcher(), + logger: logger, } kvinit.Service = services.NewBasicService(nil, kvinit.running, kvinit.stopping) return kvinit @@ -36,7 +40,7 @@ func NewKVInitService(cfg *KVConfig) *KVInitService { // This method will initialize Memberlist.KV on first call, and add it to service failure watcher. func (kvs *KVInitService) GetMemberlistKV() (*KV, error) { kvs.init.Do(func() { - kvs.kv = NewKV(*kvs.cfg) + kvs.kv = NewKV(*kvs.cfg, kvs.logger) kvs.watcher.WatchService(kvs.kv) kvs.err = kvs.kv.StartAsync(context.Background()) }) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go index 04e5497a7dad..f1b61ee3c797 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go @@ -13,6 +13,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/hashicorp/memberlist" "github.com/prometheus/client_golang/prometheus" @@ -201,7 +202,8 @@ func generateRandomSuffix() string { type KV struct { services.Service - cfg KVConfig + cfg KVConfig + logger log.Logger // dns discovery provider provider *dns.Provider @@ -276,7 +278,7 @@ var ( // gossiping part. Only after service is in Running state, it is really gossiping. Starting the service will also // trigger connecting to the existing memberlist cluster. If that fails and AbortIfJoinFails is true, error is returned // and service enters Failed state. -func NewKV(cfg KVConfig) *KV { +func NewKV(cfg KVConfig, logger log.Logger) *KV { cfg.TCPTransport.MetricsRegisterer = cfg.MetricsRegisterer cfg.TCPTransport.MetricsNamespace = cfg.MetricsNamespace @@ -289,7 +291,8 @@ func NewKV(cfg KVConfig) *KV { mlkv := &KV{ cfg: cfg, - provider: dns.NewProvider(util.Logger, mr, dns.GolangResolverType), + logger: logger, + provider: dns.NewProvider(logger, mr, dns.GolangResolverType), store: make(map[string]valueDesc), codecs: make(map[string]codec.Codec), watchers: make(map[string][]chan string), @@ -309,7 +312,7 @@ func NewKV(cfg KVConfig) *KV { } func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { - tr, err := NewTCPTransport(m.cfg.TCPTransport) + tr, err := NewTCPTransport(m.cfg.TCPTransport, m.logger) if err != nil { return nil, fmt.Errorf("failed to create transport: %v", err) } @@ -343,10 +346,10 @@ func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { } if m.cfg.RandomizeNodeName { mlCfg.Name = mlCfg.Name + "-" + generateRandomSuffix() - level.Info(util.Logger).Log("msg", "Using memberlist cluster node name", "name", mlCfg.Name) + level.Info(m.logger).Log("msg", "Using memberlist cluster node name", "name", mlCfg.Name) } - mlCfg.LogOutput = newMemberlistLoggerAdapter(util.Logger, false) + mlCfg.LogOutput = newMemberlistLoggerAdapter(m.logger, false) mlCfg.Transport = tr // Memberlist uses UDPBufferSize to figure out how many messages it can put into single "packet". @@ -398,7 +401,7 @@ func (m *KV) running(ctx context.Context) error { err := m.joinMembersOnStartup(ctx, members) if err != nil { - level.Error(util.Logger).Log("msg", "failed to join memberlist cluster", "err", err) + level.Error(m.logger).Log("msg", "failed to join memberlist cluster", "err", err) if m.cfg.AbortIfJoinFails { return errFailedToJoinCluster @@ -421,10 +424,10 @@ func (m *KV) running(ctx context.Context) error { reached, err := m.memberlist.Join(members) if err == nil { - level.Info(util.Logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) + level.Info(m.logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) } else { // Don't report error from rejoin, otherwise KV service would be stopped completely. - level.Warn(util.Logger).Log("msg", "re-joining memberlist cluster failed", "err", err) + level.Warn(m.logger).Log("msg", "re-joining memberlist cluster failed", "err", err) } case <-ctx.Done(): @@ -457,7 +460,7 @@ func (m *KV) JoinMembers(members []string) (int, error) { func (m *KV) joinMembersOnStartup(ctx context.Context, members []string) error { reached, err := m.memberlist.Join(m.cfg.JoinMembers) if err == nil { - level.Info(util.Logger).Log("msg", "joined memberlist cluster", "reached_nodes", reached) + level.Info(m.logger).Log("msg", "joined memberlist cluster", "reached_nodes", reached) return nil } @@ -465,7 +468,7 @@ func (m *KV) joinMembersOnStartup(ctx context.Context, members []string) error { return err } - level.Debug(util.Logger).Log("msg", "attempt to join memberlist cluster failed", "retries", 0, "err", err) + level.Debug(m.logger).Log("msg", "attempt to join memberlist cluster failed", "retries", 0, "err", err) lastErr := err cfg := util.BackoffConfig{ @@ -482,11 +485,11 @@ func (m *KV) joinMembersOnStartup(ctx context.Context, members []string) error { reached, err := m.memberlist.Join(members) if err != nil { lastErr = err - level.Debug(util.Logger).Log("msg", "attempt to join memberlist cluster failed", "retries", backoff.NumRetries(), "err", err) + level.Debug(m.logger).Log("msg", "attempt to join memberlist cluster failed", "retries", backoff.NumRetries(), "err", err) continue } - level.Info(util.Logger).Log("msg", "joined memberlist cluster", "reached_nodes", reached) + level.Info(m.logger).Log("msg", "joined memberlist cluster", "reached_nodes", reached) return nil } @@ -512,7 +515,7 @@ func (m *KV) discoverMembers(ctx context.Context, members []string) []string { err := m.provider.Resolve(ctx, resolve) if err != nil { - level.Error(util.Logger).Log("msg", "failed to resolve members", "addrs", strings.Join(resolve, ",")) + level.Error(m.logger).Log("msg", "failed to resolve members", "addrs", strings.Join(resolve, ",")) } ms = append(ms, m.provider.Addresses()...) @@ -523,7 +526,7 @@ func (m *KV) discoverMembers(ctx context.Context, members []string) []string { // While Stopping, we try to leave memberlist cluster and then shutdown memberlist client. // We do this in order to send out last messages, typically that ingester has LEFT the ring. func (m *KV) stopping(_ error) error { - level.Info(util.Logger).Log("msg", "leaving memberlist cluster") + level.Info(m.logger).Log("msg", "leaving memberlist cluster") // Wait until broadcast queue is empty, but don't wait for too long. // Also don't wait if there is just one node left. @@ -538,19 +541,19 @@ func (m *KV) stopping(_ error) error { } if cnt := m.broadcasts.NumQueued(); cnt > 0 { - level.Warn(util.Logger).Log("msg", "broadcast messages left in queue", "count", cnt, "nodes", m.memberlist.NumMembers()) + level.Warn(m.logger).Log("msg", "broadcast messages left in queue", "count", cnt, "nodes", m.memberlist.NumMembers()) } err := m.memberlist.Leave(m.cfg.LeaveTimeout) if err != nil { - level.Error(util.Logger).Log("msg", "error when leaving memberlist cluster", "err", err) + level.Error(m.logger).Log("msg", "error when leaving memberlist cluster", "err", err) } close(m.shutdown) err = m.memberlist.Shutdown() if err != nil { - level.Error(util.Logger).Log("msg", "error when shutting down memberlist client", "err", err) + level.Error(m.logger).Log("msg", "error when shutting down memberlist client", "err", err) } return nil } @@ -627,7 +630,7 @@ func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func // value changed val, _, err := m.get(key, codec) if err != nil { - level.Warn(util.Logger).Log("msg", "failed to decode value while watching for changes", "key", key, "err", err) + level.Warn(m.logger).Log("msg", "failed to decode value while watching for changes", "key", key, "err", err) continue } @@ -673,7 +676,7 @@ func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, case key := <-w: val, _, err := m.get(key, codec) if err != nil { - level.Warn(util.Logger).Log("msg", "failed to decode value while watching for changes", "key", key, "err", err) + level.Warn(m.logger).Log("msg", "failed to decode value while watching for changes", "key", key, "err", err) continue } @@ -734,7 +737,7 @@ func (m *KV) notifyWatchers(key string) { c.Inc() } - level.Warn(util.Logger).Log("msg", "failed to send notification to prefix watcher", "prefix", p) + level.Warn(m.logger).Log("msg", "failed to send notification to prefix watcher", "prefix", p) } } } @@ -773,7 +776,7 @@ outer: change, newver, retry, err := m.trySingleCas(key, codec, f) if err != nil { - level.Debug(util.Logger).Log("msg", "CAS attempt failed", "err", err, "retry", retry) + level.Debug(m.logger).Log("msg", "CAS attempt failed", "err", err, "retry", retry) lastError = err if !retry { @@ -789,7 +792,7 @@ outer: if m.State() == services.Running { m.broadcastNewValue(key, change, newver, codec) } else { - level.Warn(util.Logger).Log("msg", "skipped broadcasting CAS update because memberlist KV is shutting down", "key", key) + level.Warn(m.logger).Log("msg", "skipped broadcasting CAS update because memberlist KV is shutting down", "key", key) } } @@ -851,14 +854,14 @@ func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in interface{}) func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec codec.Codec) { data, err := codec.Encode(change) if err != nil { - level.Error(util.Logger).Log("msg", "failed to encode change", "err", err) + level.Error(m.logger).Log("msg", "failed to encode change", "err", err) return } kvPair := KeyValuePair{Key: key, Value: data, Codec: codec.CodecID()} pairData, err := kvPair.Marshal() if err != nil { - level.Error(util.Logger).Log("msg", "failed to serialize KV pair", "err", err) + level.Error(m.logger).Log("msg", "failed to serialize KV pair", "err", err) } if len(pairData) > 65535 { @@ -868,7 +871,7 @@ func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec // // Typically messages are smaller (when dealing with couple of updates only), but can get bigger // when broadcasting result of push/pull update. - level.Debug(util.Logger).Log("msg", "broadcast message too big, not broadcasting", "len", len(pairData)) + level.Debug(m.logger).Log("msg", "broadcast message too big, not broadcasting", "len", len(pairData)) return } @@ -893,13 +896,13 @@ func (m *KV) NotifyMsg(msg []byte) { kvPair := KeyValuePair{} err := kvPair.Unmarshal(msg) if err != nil { - level.Warn(util.Logger).Log("msg", "failed to unmarshal received KV Pair", "err", err) + level.Warn(m.logger).Log("msg", "failed to unmarshal received KV Pair", "err", err) m.numberOfInvalidReceivedMessages.Inc() return } if len(kvPair.Key) == 0 { - level.Warn(util.Logger).Log("msg", "received an invalid KV Pair (empty key)") + level.Warn(m.logger).Log("msg", "received an invalid KV Pair (empty key)") m.numberOfInvalidReceivedMessages.Inc() return } @@ -907,14 +910,14 @@ func (m *KV) NotifyMsg(msg []byte) { codec := m.GetCodec(kvPair.GetCodec()) if codec == nil { m.numberOfInvalidReceivedMessages.Inc() - level.Error(util.Logger).Log("msg", "failed to decode received value, unknown codec", "codec", kvPair.GetCodec()) + level.Error(m.logger).Log("msg", "failed to decode received value, unknown codec", "codec", kvPair.GetCodec()) return } // we have a ring update! Let's merge it with our version of the ring for given key mod, version, err := m.mergeBytesValueForKey(kvPair.Key, kvPair.Value, codec) if err != nil { - level.Error(util.Logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) + level.Error(m.logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) } else if version > 0 { m.notifyWatchers(kvPair.Key) @@ -983,18 +986,18 @@ func (m *KV) LocalState(join bool) []byte { ser, err := kvPair.Marshal() if err != nil { - level.Error(util.Logger).Log("msg", "failed to serialize KV Pair", "err", err) + level.Error(m.logger).Log("msg", "failed to serialize KV Pair", "err", err) continue } if uint(len(ser)) > math.MaxUint32 { - level.Error(util.Logger).Log("msg", "value too long", "key", key, "value_length", len(val.value)) + level.Error(m.logger).Log("msg", "value too long", "key", key, "value_length", len(val.value)) continue } err = binary.Write(&buf, binary.BigEndian, uint32(len(ser))) if err != nil { - level.Error(util.Logger).Log("msg", "failed to write uint32 to buffer?", "err", err) + level.Error(m.logger).Log("msg", "failed to write uint32 to buffer?", "err", err) continue } buf.Write(ser) @@ -1046,14 +1049,14 @@ func (m *KV) MergeRemoteState(data []byte, join bool) { codec := m.GetCodec(kvPair.GetCodec()) if codec == nil { - level.Error(util.Logger).Log("msg", "failed to parse remote state: unknown codec for key", "codec", kvPair.GetCodec(), "key", kvPair.GetKey()) + level.Error(m.logger).Log("msg", "failed to parse remote state: unknown codec for key", "codec", kvPair.GetCodec(), "key", kvPair.GetKey()) continue } // we have both key and value, try to merge it with our state change, newver, err := m.mergeBytesValueForKey(kvPair.Key, kvPair.Value, codec) if err != nil { - level.Error(util.Logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) + level.Error(m.logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) } else if newver > 0 { m.notifyWatchers(kvPair.Key) m.broadcastNewValue(kvPair.Key, change, newver, codec) @@ -1061,7 +1064,7 @@ func (m *KV) MergeRemoteState(data []byte, join bool) { } if err != nil { - level.Error(util.Logger).Log("msg", "failed to parse remote state", "err", err) + level.Error(m.logger).Log("msg", "failed to parse remote state", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go index d6af8b2ec990..13f3f993e61f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go @@ -18,7 +18,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -74,6 +73,7 @@ func (cfg *TCPTransportConfig) RegisterFlags(f *flag.FlagSet, prefix string) { // It uses a new TCP connections for each operation. There is no connection reuse. type TCPTransport struct { cfg TCPTransportConfig + logger log.Logger packetCh chan *memberlist.Packet connCh chan net.Conn wg sync.WaitGroup @@ -95,13 +95,12 @@ type TCPTransport struct { sentPackets prometheus.Counter sentPacketsBytes prometheus.Counter sentPacketsErrors prometheus.Counter - - unknownConnections prometheus.Counter + unknownConnections prometheus.Counter } // NewTCPTransport returns a new tcp-based transport with the given configuration. On // success all the network listeners will be created and listening. -func NewTCPTransport(config TCPTransportConfig) (*TCPTransport, error) { +func NewTCPTransport(config TCPTransportConfig, logger log.Logger) (*TCPTransport, error) { if len(config.BindAddrs) == 0 { config.BindAddrs = []string{zeroZeroZeroZero} } @@ -110,6 +109,7 @@ func NewTCPTransport(config TCPTransportConfig) (*TCPTransport, error) { var ok bool t := TCPTransport{ cfg: config, + logger: logger, packetCh: make(chan *memberlist.Packet), connCh: make(chan net.Conn), } @@ -186,7 +186,7 @@ func (t *TCPTransport) tcpListen(tcpLn *net.TCPListener) { loopDelay = maxDelay } - level.Error(util.Logger).Log("msg", "TCPTransport: Error accepting TCP connection", "err", err) + level.Error(t.logger).Log("msg", "TCPTransport: Error accepting TCP connection", "err", err) time.Sleep(loopDelay) continue } @@ -201,7 +201,7 @@ var noopLogger = log.NewNopLogger() func (t *TCPTransport) debugLog() log.Logger { if t.cfg.TransportDebug { - return level.Debug(util.Logger) + return level.Debug(t.logger) } return noopLogger } @@ -220,7 +220,7 @@ func (t *TCPTransport) handleConnection(conn *net.TCPConn) { msgType := []byte{0} _, err := io.ReadFull(conn, msgType) if err != nil { - level.Error(util.Logger).Log("msg", "TCPTransport: failed to read message type", "err", err) + level.Error(t.logger).Log("msg", "TCPTransport: failed to read message type", "err", err) return } @@ -239,7 +239,7 @@ func (t *TCPTransport) handleConnection(conn *net.TCPConn) { _, err := io.ReadFull(conn, b) if err != nil { t.receivedPacketsErrors.Inc() - level.Error(util.Logger).Log("msg", "TCPTransport: error while reading address:", "err", err) + level.Error(t.logger).Log("msg", "TCPTransport: error while reading address:", "err", err) return } @@ -247,7 +247,7 @@ func (t *TCPTransport) handleConnection(conn *net.TCPConn) { _, err = io.ReadFull(conn, addrBuf) if err != nil { t.receivedPacketsErrors.Inc() - level.Error(util.Logger).Log("msg", "TCPTransport: error while reading address:", "err", err) + level.Error(t.logger).Log("msg", "TCPTransport: error while reading address:", "err", err) return } @@ -255,13 +255,13 @@ func (t *TCPTransport) handleConnection(conn *net.TCPConn) { buf, err := ioutil.ReadAll(conn) if err != nil { t.receivedPacketsErrors.Inc() - level.Error(util.Logger).Log("msg", "TCPTransport: error while reading packet data:", "err", err) + level.Error(t.logger).Log("msg", "TCPTransport: error while reading packet data:", "err", err) return } if len(buf) < md5.Size { t.receivedPacketsErrors.Inc() - level.Error(util.Logger).Log("msg", "TCPTransport: not enough data received", "length", len(buf)) + level.Error(t.logger).Log("msg", "TCPTransport: not enough data received", "length", len(buf)) return } @@ -272,7 +272,7 @@ func (t *TCPTransport) handleConnection(conn *net.TCPConn) { if !bytes.Equal(receivedDigest, expectedDigest[:]) { t.receivedPacketsErrors.Inc() - level.Warn(util.Logger).Log("msg", "TCPTransport: packet digest mismatch", "expected", fmt.Sprintf("%x", expectedDigest), "received", fmt.Sprintf("%x", receivedDigest)) + level.Warn(t.logger).Log("msg", "TCPTransport: packet digest mismatch", "expected", fmt.Sprintf("%x", expectedDigest), "received", fmt.Sprintf("%x", receivedDigest)) } t.debugLog().Log("msg", "TCPTransport: Received packet", "addr", addr(addrBuf), "size", len(buf), "hash", fmt.Sprintf("%x", receivedDigest)) @@ -286,7 +286,7 @@ func (t *TCPTransport) handleConnection(conn *net.TCPConn) { } } else { t.unknownConnections.Inc() - level.Error(util.Logger).Log("msg", "TCPTransport: unknown message type", "msgType", msgType) + level.Error(t.logger).Log("msg", "TCPTransport: unknown message type", "msgType", msgType) } } @@ -354,7 +354,7 @@ func (t *TCPTransport) FinalAdvertiseAddr(ip string, port int) (net.IP, int, err advertisePort = t.GetAutoBindPort() } - level.Debug(util.Logger).Log("msg", "FinalAdvertiseAddr", "advertiseAddr", advertiseAddr.String(), "advertisePort", advertisePort) + level.Debug(t.logger).Log("msg", "FinalAdvertiseAddr", "advertiseAddr", advertiseAddr.String(), "advertisePort", advertisePort) t.setAdvertisedAddr(advertiseAddr, advertisePort) return advertiseAddr, advertisePort, nil @@ -387,7 +387,7 @@ func (t *TCPTransport) WriteTo(b []byte, addr string) (time.Time, error) { return time.Time{}, fmt.Errorf("WriteTo %s: %w", addr, err) } - level.Warn(util.Logger).Log("msg", "TCPTransport: WriteTo failed", "addr", addr, "err", err) + level.Warn(t.logger).Log("msg", "TCPTransport: WriteTo failed", "addr", addr, "err", err) return time.Now(), nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index a73e3def699b..bc1f1b171c46 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -228,7 +228,7 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error { if err := ringDesc.Ready(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { level.Warn(util.Logger).Log("msg", "found an existing instance(s) with a problem in the ring, "+ - "this instance cannot complete joining and become ready until this problem is resolved. "+ + "this instance cannot become ready until this problem is resolved. "+ "The /ring http endpoint on the distributor (or single binary) provides visibility into the ring.", "ring", i.RingName, "err", err) return err diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go index 7633d12f8a02..1439402e5b5b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go @@ -463,6 +463,28 @@ func (r *Ruler) CreateRuleGroup(w http.ResponseWriter, req *http.Request) { respondAccepted(w, logger) } +func (r *Ruler) DeleteNamespace(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + + userID, namespace, _, err := parseRequest(req, true, false) + if err != nil { + respondError(logger, w, err.Error()) + return + } + + err = r.store.DeleteNamespace(req.Context(), userID, namespace) + if err != nil { + if err == rules.ErrGroupNamespaceNotFound { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + respondError(logger, w, err.Error()) + return + } + + respondAccepted(w, logger) +} + func (r *Ruler) DeleteRuleGroup(w http.ResponseWriter, req *http.Request) { logger := util.WithContext(req.Context(), util.Logger) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go index ff0c424fdd0e..494e81b5de0e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go @@ -72,12 +72,21 @@ func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { } } +// RulesLimits is the one function we need from limits.Overrides, and +// is here to limit coupling. +type RulesLimits interface { + EvaluationDelay(usedID string) time.Duration +} + // engineQueryFunc returns a new query function using the rules.EngineQueryFunc function // and passing an altered timestamp. -func engineQueryFunc(engine *promql.Engine, q storage.Queryable, delay time.Duration) rules.QueryFunc { - orig := rules.EngineQueryFunc(engine, q) +func engineQueryFunc(engine *promql.Engine, q storage.Queryable, overrides RulesLimits, userID string) rules.QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - return orig(ctx, qs, t.Add(-delay)) + orig := rules.EngineQueryFunc(engine, q) + // Delay the evaluation of all rules by a set interval to give a buffer + // to metric that haven't been forwarded to cortex yet. + evaluationDelay := overrides.EvaluationDelay(userID) + return orig(ctx, qs, t.Add(-evaluationDelay)) } } @@ -94,6 +103,7 @@ func DefaultTenantManagerFactory( p Pusher, q storage.Queryable, engine *promql.Engine, + overrides RulesLimits, ) ManagerFactory { return func( ctx context.Context, @@ -105,7 +115,7 @@ func DefaultTenantManagerFactory( return rules.NewManager(&rules.ManagerOptions{ Appendable: &PusherAppendable{pusher: p, userID: userID}, Queryable: q, - QueryFunc: engineQueryFunc(engine, q, cfg.EvaluationDelay), + QueryFunc: engineQueryFunc(engine, q, overrides, userID), Context: user.InjectOrgID(ctx, userID), ExternalURL: cfg.ExternalURL.URL, NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String()), diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index 2e68b722897d..7adce6bf2d18 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -49,8 +49,8 @@ type Config struct { ClientTLSConfig tls.ClientConfig `yaml:"ruler_client"` // How frequently to evaluate rules by default. EvaluationInterval time.Duration `yaml:"evaluation_interval"` - // Delay the evaluation of all rules by a set interval to give a buffer - // to metric that haven't been forwarded to cortex yet. + // Deprecated. Replaced with pkg/util/validation/Limits.RulerEvaluationDelay field. + // TODO: To be removed in Cortex 1.6. EvaluationDelay time.Duration `yaml:"evaluation_delay_duration"` // How frequently to poll for updated rules. PollInterval time.Duration `yaml:"poll_interval"` @@ -110,7 +110,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ExternalURL.URL, _ = url.Parse("") // Must be non-nil f.Var(&cfg.ExternalURL, "ruler.external.url", "URL of alerts return path.") f.DurationVar(&cfg.EvaluationInterval, "ruler.evaluation-interval", 1*time.Minute, "How frequently to evaluate rules") - f.DurationVar(&cfg.EvaluationDelay, "ruler.evaluation-delay-duration", 0, "Duration to delay the evaluation of rules to ensure they underlying metrics have been pushed to cortex.") + f.DurationVar(&cfg.EvaluationDelay, "ruler.evaluation-delay-duration-deprecated", 0, "Deprecated. Please use -ruler.evaluation-delay-duration instead.") f.DurationVar(&cfg.PollInterval, "ruler.poll-interval", 1*time.Minute, "How frequently to poll for rule changes") f.StringVar(&cfg.AlertmanagerURL, "ruler.alertmanager-url", "", "Comma-separated list of URL(s) of the Alertmanager(s) to send notifications to. Each Alertmanager URL is treated as a separate group in the configuration. Multiple Alertmanagers in HA per group can be supported by using DNS resolution via -ruler.alertmanager-discovery.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go index dbd9a26ddc5b..748f868998aa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go @@ -48,21 +48,26 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { } for i, rl := range rg.GetRules() { - recordNode := yaml.Node{} - recordNode.SetString(rl.GetRecord()) - alertNode := yaml.Node{} - alertNode.SetString(rl.GetAlert()) exprNode := yaml.Node{} exprNode.SetString(rl.GetExpr()) + newRule := rulefmt.RuleNode{ - Record: recordNode, - Alert: alertNode, Expr: exprNode, Labels: client.FromLabelAdaptersToLabels(rl.Labels).Map(), Annotations: client.FromLabelAdaptersToLabels(rl.Annotations).Map(), For: model.Duration(rl.GetFor()), } + if rl.GetRecord() != "" { + recordNode := yaml.Node{} + recordNode.SetString(rl.GetRecord()) + newRule.Record = recordNode + } else { + alertNode := yaml.Node{} + alertNode.SetString(rl.GetAlert()) + newRule.Alert = alertNode + } + formattedRuleGroup.Rules[i] = newRule } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go index 7c18ce8169fc..3ded763c0b9b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go @@ -87,6 +87,11 @@ func (l *Client) DeleteRuleGroup(ctx context.Context, userID, namespace string, return errors.New("DeleteRuleGroup unsupported in rule local store") } +// DeleteNamespace implements RulerStore +func (l *Client) DeleteNamespace(ctx context.Context, userID, namespace string) error { + return errors.New("DeleteNamespace unsupported in rule local store") +} + func (l *Client) listAllRulesGroupsForUser(ctx context.Context, userID string) (rules.RuleGroupList, error) { var allLists rules.RuleGroupList diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go index 381bf0feda63..0238e27a4d2f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go @@ -149,15 +149,45 @@ func (o *RuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespac return err } +// DeleteNamespace deletes all the rule groups in the specified namespace +func (o *RuleStore) DeleteNamespace(ctx context.Context, userID, namespace string) error { + ruleGroupObjects, _, err := o.client.List(ctx, generateRuleObjectKey(userID, namespace, "")) + if err != nil { + return err + } + + if len(ruleGroupObjects) == 0 { + return rules.ErrGroupNamespaceNotFound + } + + for _, obj := range ruleGroupObjects { + level.Debug(util.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key) + err = o.client.DeleteObject(ctx, obj.Key) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key) + return err + } + } + + return nil +} + func generateRuleObjectKey(id, namespace, name string) string { if id == "" { return rulePrefix } + prefix := rulePrefix + id + "/" if namespace == "" { return prefix } - return prefix + base64.URLEncoding.EncodeToString([]byte(namespace)) + "/" + base64.URLEncoding.EncodeToString([]byte(name)) + + ns := base64.URLEncoding.EncodeToString([]byte(namespace)) + "/" + if name == "" { + return prefix + ns + } + + return prefix + ns + base64.URLEncoding.EncodeToString([]byte(name)) } func decomposeRuleObjectKey(handle string) string { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go index 884e282d10ea..143ce08aa9ae 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go @@ -26,6 +26,7 @@ type RuleStore interface { GetRuleGroup(ctx context.Context, userID, namespace, group string) (*RuleGroupDesc, error) SetRuleGroup(ctx context.Context, userID, namespace string, group *RuleGroupDesc) error DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error + DeleteNamespace(ctx context.Context, userID, namespace string) error } // RuleGroupList contains a set of rule groups @@ -129,3 +130,8 @@ func (c *ConfigRuleStore) SetRuleGroup(ctx context.Context, userID, namespace st func (c *ConfigRuleStore) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { return errors.New("not implemented by the config service rule store") } + +// DeleteNamespace is not implemented +func (c *ConfigRuleStore) DeleteNamespace(ctx context.Context, userID, namespace string) error { + return errors.New("not implemented by the config service rule store") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go index 10343f522889..547c70075abf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go @@ -17,7 +17,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB Azure storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB Azure storage diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go index 28dc109e2030..26a3ebd1c45c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go @@ -9,7 +9,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB filesystem storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB filesystem storage with the provided prefix diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go index 899e706dd91c..44eb020b1279 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go @@ -14,7 +14,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB GCS storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB GCS storage with the provided prefix diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go index f5f396cb0a8f..6677723cfef9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go @@ -17,7 +17,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB s3 storage with the provided prefix func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB s3 storage with the provided prefix diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 56b1f16676ee..075d13d4d3bf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -118,7 +118,7 @@ func (cfg *BucketConfig) RegisterFlags(f *flag.FlagSet) { cfg.Azure.RegisterFlags(f) cfg.Filesystem.RegisterFlags(f) - f.StringVar(&cfg.Backend, "experimental.blocks-storage.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) + f.StringVar(&cfg.Backend, "blocks-storage.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) } // RegisterFlags registers the TSDB flags @@ -178,18 +178,18 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { cfg.BlockRanges = []time.Duration{2 * time.Hour} // Default 2h block } - f.StringVar(&cfg.Dir, "experimental.blocks-storage.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.") - f.Var(&cfg.BlockRanges, "experimental.blocks-storage.tsdb.block-ranges-period", "TSDB blocks range period.") - f.DurationVar(&cfg.Retention, "experimental.blocks-storage.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.") - f.DurationVar(&cfg.ShipInterval, "experimental.blocks-storage.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") - f.IntVar(&cfg.ShipConcurrency, "experimental.blocks-storage.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") - f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "experimental.blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup") - f.DurationVar(&cfg.HeadCompactionInterval, "experimental.blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.") - f.IntVar(&cfg.HeadCompactionConcurrency, "experimental.blocks-storage.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block") - f.DurationVar(&cfg.HeadCompactionIdleTimeout, "experimental.blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.") - f.IntVar(&cfg.StripeSize, "experimental.blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") - f.BoolVar(&cfg.WALCompressionEnabled, "experimental.blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") - f.BoolVar(&cfg.FlushBlocksOnShutdown, "experimental.blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") + f.StringVar(&cfg.Dir, "blocks-storage.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.") + f.Var(&cfg.BlockRanges, "blocks-storage.tsdb.block-ranges-period", "TSDB blocks range period.") + f.DurationVar(&cfg.Retention, "blocks-storage.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.") + f.DurationVar(&cfg.ShipInterval, "blocks-storage.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") + f.IntVar(&cfg.ShipConcurrency, "blocks-storage.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") + f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup") + f.DurationVar(&cfg.HeadCompactionInterval, "blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.") + f.IntVar(&cfg.HeadCompactionConcurrency, "blocks-storage.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block") + f.DurationVar(&cfg.HeadCompactionIdleTimeout, "blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.") + f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") + f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") + f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") } // Validate the config. @@ -248,22 +248,22 @@ type BucketStoreConfig struct { // RegisterFlags registers the BucketStore flags func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.IndexCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.index-cache.") - cfg.ChunksCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.chunks-cache.") - cfg.MetadataCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.metadata-cache.") - - f.StringVar(&cfg.SyncDir, "experimental.blocks-storage.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") - f.DurationVar(&cfg.SyncInterval, "experimental.blocks-storage.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") - f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.blocks-storage.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") - f.IntVar(&cfg.MaxConcurrent, "experimental.blocks-storage.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.") - f.IntVar(&cfg.TenantSyncConcurrency, "experimental.blocks-storage.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") - f.IntVar(&cfg.BlockSyncConcurrency, "experimental.blocks-storage.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.") - f.IntVar(&cfg.MetaSyncConcurrency, "experimental.blocks-storage.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.") - f.DurationVar(&cfg.ConsistencyDelay, "experimental.blocks-storage.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.") - f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.blocks-storage.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ + cfg.IndexCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.index-cache.") + cfg.ChunksCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.chunks-cache.") + cfg.MetadataCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.metadata-cache.") + + f.StringVar(&cfg.SyncDir, "blocks-storage.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") + f.DurationVar(&cfg.SyncInterval, "blocks-storage.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") + f.Uint64Var(&cfg.MaxChunkPoolBytes, "blocks-storage.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") + f.IntVar(&cfg.MaxConcurrent, "blocks-storage.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.") + f.IntVar(&cfg.TenantSyncConcurrency, "blocks-storage.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") + f.IntVar(&cfg.BlockSyncConcurrency, "blocks-storage.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.") + f.IntVar(&cfg.MetaSyncConcurrency, "blocks-storage.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.") + f.DurationVar(&cfg.ConsistencyDelay, "blocks-storage.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.") + f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "blocks-storage.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") - f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") + f.IntVar(&cfg.PostingOffsetsInMemSampling, "blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") } // Validate the config. diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go index dde7067be1f1..cbe8c3520d43 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go @@ -44,7 +44,7 @@ type IndexCacheConfig struct { } func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.index-cache.") + cfg.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.index-cache.") } func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go index 507f23654a98..3b13764aec80 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go @@ -63,8 +63,8 @@ type Config struct { func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ShardingRing.RegisterFlags(f) - f.BoolVar(&cfg.ShardingEnabled, "experimental.store-gateway.sharding-enabled", false, "Shard blocks across multiple store gateway instances."+sharedOptionWithQuerier) - f.StringVar(&cfg.ShardingStrategy, "experimental.store-gateway.sharding-strategy", ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) + f.BoolVar(&cfg.ShardingEnabled, "store-gateway.sharding-enabled", false, "Shard blocks across multiple store gateway instances."+sharedOptionWithQuerier) + f.StringVar(&cfg.ShardingStrategy, "store-gateway.sharding-strategy", ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) } // Validate the Config. diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go index 830c5af6bc46..fdb15b69d1bc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go @@ -62,18 +62,18 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { } // Ring flags - cfg.KVStore.RegisterFlagsWithPrefix("experimental.store-gateway.sharding-ring.", "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "experimental.store-gateway.sharding-ring.heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") - f.DurationVar(&cfg.HeartbeatTimeout, "experimental.store-gateway.sharding-ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring."+sharedOptionWithQuerier) - f.IntVar(&cfg.ReplicationFactor, "experimental.store-gateway.replication-factor", 3, "The replication factor to use when sharding blocks."+sharedOptionWithQuerier) - f.StringVar(&cfg.TokensFilePath, "experimental.store-gateway.tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") + cfg.KVStore.RegisterFlagsWithPrefix("store-gateway.sharding-ring.", "collectors/", f) + f.DurationVar(&cfg.HeartbeatPeriod, "store-gateway.sharding-ring.heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "store-gateway.sharding-ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring."+sharedOptionWithQuerier) + f.IntVar(&cfg.ReplicationFactor, "store-gateway.replication-factor", 3, "The replication factor to use when sharding blocks."+sharedOptionWithQuerier) + f.StringVar(&cfg.TokensFilePath, "store-gateway.tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} - f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "experimental.store-gateway.sharding-ring.instance-interface", "Name of network interface to read address from.") - f.StringVar(&cfg.InstanceAddr, "experimental.store-gateway.sharding-ring.instance-addr", "", "IP address to advertise in the ring.") - f.IntVar(&cfg.InstancePort, "experimental.store-gateway.sharding-ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.InstanceID, "experimental.store-gateway.sharding-ring.instance-id", hostname, "Instance ID to register in the ring.") + f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "store-gateway.sharding-ring.instance-interface", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, "store-gateway.sharding-ring.instance-addr", "", "IP address to advertise in the ring.") + f.IntVar(&cfg.InstancePort, "store-gateway.sharding-ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") + f.StringVar(&cfg.InstanceID, "store-gateway.sharding-ring.instance-id", hostname, "Instance ID to register in the ring.") // Defaults for internal settings. cfg.RingCheckPeriod = 5 * time.Second diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go b/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go index 8875f9dfbdc6..1af768016bf2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go @@ -192,3 +192,18 @@ func (m *Manager) findInverseDependencies(mod string, mods []string) []string { return result } + +// DependenciesForModule returns transitive dependencies for given module, sorted by name. +func (m *Manager) DependenciesForModule(module string) []string { + dedup := map[string]bool{} + for _, d := range m.listDeps(module) { + dedup[d] = true + } + + result := make([]string, 0, len(dedup)) + for d := range dedup { + result = append(result, d) + } + sort.Strings(result) + return result +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/services/manager.go b/vendor/github.com/cortexproject/cortex/pkg/util/services/manager.go index dc8efead0b91..6a556b1a78e9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/services/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/services/manager.go @@ -116,7 +116,18 @@ func (m *Manager) AwaitHealthy(ctx context.Context) error { defer m.mu.Unlock() if m.state != healthy { - return errors.New("not healthy") + terminated := len(m.byState[Terminated]) + + var failedReasons []string + for _, s := range m.byState[Failed] { + err := s.FailureCase() + if err != nil { + // err is never nil for a failed service. + failedReasons = append(failedReasons, err.Error()) + } + } + + return fmt.Errorf("not healthy, %d terminated, %d failed: %v", terminated, len(failedReasons), failedReasons) } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index e88bb47b2b88..5d225f3f91fb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -18,6 +18,13 @@ const ( GlobalIngestionRateStrategy = "global" ) +//LimitError are errors that do not comply with the limits specified. +type LimitError string + +func (e LimitError) Error() string { + return string(e) +} + // Limits describe all the limits for users; can be used to describe global default // limits via flags, or per-user limits via yaml config. type Limits struct { @@ -62,6 +69,9 @@ type Limits struct { CardinalityLimit int `yaml:"cardinality_limit"` MaxCacheFreshness time.Duration `yaml:"max_cache_freshness"` + // Ruler defaults and limits. + RulerEvaluationDelay time.Duration `yaml:"ruler_evaluation_delay_duration"` + // Store-gateway. StoreGatewayTenantShardSize int `yaml:"store_gateway_tenant_shard_size"` @@ -109,11 +119,13 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage. 0 to disable.") f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") + f.DurationVar(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", 0, "Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") + f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides. [deprecated, use -runtime-config.file instead]") f.DurationVar(&l.PerTenantOverridePeriod, "limits.per-user-override-period", 10*time.Second, "Period with which to reload the overrides. [deprecated, use -runtime-config.reload-period instead]") // Store-gateway. - f.IntVar(&l.StoreGatewayTenantShardSize, "experimental.store-gateway.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") + f.IntVar(&l.StoreGatewayTenantShardSize, "store-gateway.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") } // Validate the limits config and returns an error if the validation @@ -346,6 +358,11 @@ func (o *Overrides) SubringSize(userID string) int { return o.getOverridesForUser(userID).SubringSize } +// EvaluationDelay returns the rules evaluation delay for a given user. +func (o *Overrides) EvaluationDelay(userID string) time.Duration { + return o.getOverridesForUser(userID).RulerEvaluationDelay +} + // StoreGatewayTenantShardSize returns the size of the store-gateway shard size for a given user. func (o *Overrides) StoreGatewayTenantShardSize(userID string) int { return o.getOverridesForUser(userID).StoreGatewayTenantShardSize diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE new file mode 100644 index 000000000000..22080f736a41 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017-2020 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go new file mode 100644 index 000000000000..7a6f8203c678 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go @@ -0,0 +1,79 @@ +package rendezvous + +type Rendezvous struct { + nodes map[string]int + nstr []string + nhash []uint64 + hash Hasher +} + +type Hasher func(s string) uint64 + +func New(nodes []string, hash Hasher) *Rendezvous { + r := &Rendezvous{ + nodes: make(map[string]int, len(nodes)), + nstr: make([]string, len(nodes)), + nhash: make([]uint64, len(nodes)), + hash: hash, + } + + for i, n := range nodes { + r.nodes[n] = i + r.nstr[i] = n + r.nhash[i] = hash(n) + } + + return r +} + +func (r *Rendezvous) Lookup(k string) string { + // short-circuit if we're empty + if len(r.nodes) == 0 { + return "" + } + + khash := r.hash(k) + + var midx int + var mhash = xorshiftMult64(khash ^ r.nhash[0]) + + for i, nhash := range r.nhash[1:] { + if h := xorshiftMult64(khash ^ nhash); h > mhash { + midx = i + 1 + mhash = h + } + } + + return r.nstr[midx] +} + +func (r *Rendezvous) Add(node string) { + r.nodes[node] = len(r.nstr) + r.nstr = append(r.nstr, node) + r.nhash = append(r.nhash, r.hash(node)) +} + +func (r *Rendezvous) Remove(node string) { + // find index of node to remove + nidx := r.nodes[node] + + // remove from the slices + l := len(r.nstr) + r.nstr[nidx] = r.nstr[l] + r.nstr = r.nstr[:l] + + r.nhash[nidx] = r.nhash[l] + r.nhash = r.nhash[:l] + + // update the map + delete(r.nodes, node) + moved := r.nstr[nidx] + r.nodes[moved] = nidx +} + +func xorshiftMult64(x uint64) uint64 { + x ^= x >> 12 // a + x ^= x << 25 // b + x ^= x >> 27 // c + return x * 2685821657736338717 +} diff --git a/vendor/github.com/felixge/fgprof/LICENSE.txt b/vendor/github.com/felixge/fgprof/LICENSE.txt new file mode 100644 index 000000000000..3e424911bdb3 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/LICENSE.txt @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright © 2020 Felix Geisendörfer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/felixge/fgprof/README.md b/vendor/github.com/felixge/fgprof/README.md new file mode 100644 index 000000000000..fe0c0a25d300 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/README.md @@ -0,0 +1,214 @@ +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go)](https://pkg.go.dev/github.com/felixge/fgprof) +![GitHub Workflow Status](https://img.shields.io/github/workflow/status/felixge/fgprof/Go) +![GitHub](https://img.shields.io/github/license/felixge/fgprof) + +# :rocket: fgprof - The Full Go Profiler + +fgprof is a sampling [Go](https://golang.org/) profiler that allows you to analyze On-CPU as well as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O) time together. + +Go's builtin sampling CPU profiler can only show On-CPU time, but it's better than fgprof at that. Go also includes tracing profilers that can analyze I/O, but they can't be combined with the CPU profiler. + +fgprof is designed for analyzing applications with mixed I/O and CPU workloads. + +## Quick Start + +If this is the first time you hear about fgprof, you should start by reading +about [The Problem](#the-problem) & [How it Works](#how-it-works). + +There is no need to choose between fgprof and the builtin profiler. Here is how to add both to your application: + +```go +package main + +import( + _ "net/http/pprof" + "github.com/felixge/fgprof" +) + +func main() { + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() + + // +} +``` + +fgprof is compatible with the `go tool pprof` visualizer, so taking and analyzing a 3s profile is as simple as: + +``` +go tool pprof --http=:6061 http://localhost:6060/debug/fgprof?seconds=3 +``` + +![](./assets/fgprof_pprof.png) + +Additionally fgprof supports the plain text format used by Brendan Gregg's [FlameGraph](http://www.brendangregg.com/flamegraphs.html) utility: + +``` +git clone https://github.com/brendangregg/FlameGraph +cd FlameGraph +curl -s 'localhost:6060/debug/fgprof?seconds=3&format=folded' > fgprof.folded +./flamegraph.pl fgprof.folded > fgprof.svg +``` + +![](./assets/fgprof_gregg.png) + +Which tool you prefer is up to you, but one thing I like about Gregg's tool is that you can filter the plaintext files using grep which can be very useful when analyzing large programs. + +If you don't have a program to profile right now, you can `go run ./example` which should allow you to reproduce the graphs you see above. If you've never seen such graphs before, and are unsure how to read them, head over to Brendan Gregg's [Flame Graph](http://www.brendangregg.com/flamegraphs.html) page. + +## The Problem + +Let's say you've been tasked to optimize a simple program that has a loop calling out to three functions: + +```go +func main() { + for { + // Http request to a web service that might be slow. + slowNetworkRequest() + // Some heavy CPU computation. + cpuIntensiveTask() + // Poorly named function that you don't understand yet. + weirdFunction() + } +} +``` + +One way to decide which of these three functions you should focus your attention on would be to wrap each function call like this: + +```go +start := time.Start() +slowNetworkRequest() +fmt.Printf("slowNetworkRequest: %s\n", time.Since(start)) +// ... +``` + +However, this can be very tedious for large programs. You'll also have to figure out how to average the numbers in case they fluctuate. And once you've done that, you'll have to repeat the process for the functions called by the function you decide to focus on. + +### /debug/pprof/profile + +So, this seems like a perfect use case for a profiler. Let's try the `/debug/pprof/profile` endpoint of the builtin `net/http/pprof` pkg to analyze our program for 10s: + +```go +import _ "net/http/pprof" + +func main() { + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() + + // +} +``` + +``` +go tool pprof -http=:6061 http://localhost:6060/debug/pprof/profile?seconds=10 +``` + +That was easy! Looks like we're spending all our time in `cpuIntensiveTask()`, so let's focus on that? + +![](./assets/pprof_cpu.png) + +But before we get carried away, let's quickly double check this assumption by manually timing our function calls with `time.Since()` as described above: + +``` +slowNetworkRequest: 66.815041ms +cpuIntensiveTask: 30.000672ms +weirdFunction: 10.64764ms +slowNetworkRequest: 67.194516ms +cpuIntensiveTask: 30.000912ms +weirdFunction: 10.105371ms +// ... +``` + +Oh no, the builtin CPU profiler is misleading us! How is that possible? Well, it turns out the builtin profiler only shows On-CPU time. Time spent waiting on I/O is completely hidden from us. + +### /debug/pprof/trace + +Let's try something else. The `/debug/pprof/trace` endpoint includes a "synchronization blocking profile", maybe that's what we need? + +``` +curl -so pprof.trace http://localhost:6060/debug/pprof/trace?seconds=10 +go tool trace --pprof=sync pprof.trace > sync.pprof +go tool pprof --http=:6061 sync.pprof +``` + +Oh no, we're being mislead again. This profiler thinks all our time is spent on `slowNetworkRequest()`. It's completely missing `cpuIntensiveTask()`. And what about `weirdFunction()`? It seems like no builtin profiler can see it? + +![](./assets/pprof_trace.png) + +### /debug/fgprof + +So what can we do? Let's try fgprof, which is designed to analyze mixed I/O and CPU workloads like the one we're dealing with here. We can easily add it alongside the builtin profilers. + +```go +import( + _ "net/http/pprof" + "github.com/felixge/fgprof" +) + +func main() { + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() + + // +} +``` + + + +``` +go tool pprof --http=:6061 http://localhost:6060/debug/fgprof?seconds=10 +``` + +Finally, a profile that shows all three of our functions and how much time we're spending on them. It also turns out our `weirdFunction()` was simply calling `time.Sleep()`, how weird indeed! + +![](./assets/fgprof_pprof.png) + +## How it Works + +### fgprof + +fgprof is implemented as a background goroutine that wakes up 99 times per second and calls `runtime.GoroutineProfile`. This returns a list of all goroutines regardless of their current On/Off CPU scheduling status and their call stacks. + +This data is used to maintain an in-memory stack counter which can be converted to the pprof or folded output format. The meat of the implementation is super simple and < 100 lines of code, you should [check it out](./fgprof.go). + +Generally speaking, fgprof should not have a big impact on the performance of your program. However `runtime.GoroutineProfile` calls `stopTheWorld()` and could be slow if you have a lot of goroutines. For now the advise is to test the impact of the profiler on a development environment before running it against production instances. In the future this README will try to provide a more detailed analysis of the performance impact. + +### Go's builtin CPU Profiler + +The builtin Go CPU profiler uses the [setitimer(2)](https://linux.die.net/man/2/setitimer) system call to ask the operating system to be sent a `SIGPROF` signal 100 times a second. Each signal stops the Go process and gets delivered to a random thread's `sigtrampgo()` function. This function then proceeds to call `sigprof()` or `sigprofNonGo()` to record the thread's current stack. + +Since Go uses non-blocking I/O, Goroutines that wait on I/O are parked and not running on any threads. Therefore they end up being largely invisible to Go's builtin CPU profiler. + +## The Future of Go Profiling + +There is a great proposal for [hardware performance counters for CPU profiling](https://go.googlesource.com/proposal/+/refs/changes/08/219508/2/design/36821-perf-counter-pprof.md#5-empirical-evidence-on-the-accuracy-and-precision-of-pmu-profiles) in Go. The proposal is aimed at making the builtin CPU Profiler even more accurate, especially under highly parallel workloads on many CPUs. It also includes a very in-depth analysis of the current profiler. Based on the design, I think the proposed profiler would also be blind to I/O workloads, but still seems appealing for CPU based workloads. + +As far as fgprof itself is concerned, I might implement streaming output, leaving the final aggregation to other tools. This would open the door to even more advanced analysis, perhaps by integrating with tools such as [flamescope](https://github.com/Netflix/flamescope). + +Additionally I'm also open to the idea of contributing fgprof to the Go project itself. I've [floated the idea](https://groups.google.com/g/golang-dev/c/LCJyvL90xv8) on the golang-dev mailing list, so let's see what happens. + + +## Known Issues + +There is no perfect approach to profiling, and fgprof is no exception. Below is a list of known issues that will hopefully not be of practical concern for most users, but are important to highlight. + +- fgprof can't catch goroutines while they are running in loops without function calls, only when they get asynchronously preempted. This can lead to reporting inaccuracies. Use the builtin CPU profiler if this is a problem for you. +- fgprof may not work in Go 1.13 if another goroutine is in a loop without function calls the whole time. Async preemption in Go 1.14 should mostly fix this issue. +- Internal C functions are not showing up in the stack traces, e.g. `runtime.nanotime` which is called by `time.Since` in the example program. +- The current implementation is relying on the Go scheduler to schedule the internal goroutine at a fixed sample rate. Scheduler delays, especially biased ones, might cause inaccuracies. + +## Credits + +The following articles helped me to learn more about how profilers in general, and the Go profiler in particular work. + +- [How do Ruby & Python profilers work?](https://jvns.ca/blog/2017/12/17/how-do-ruby---python-profilers-work-/) by Julia Evans +- [Profiling Go programs with pprof](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/) by Julia Evans + +## License + +fgprof is licensed under the MIT License. diff --git a/vendor/github.com/felixge/fgprof/fgprof.go b/vendor/github.com/felixge/fgprof/fgprof.go new file mode 100644 index 000000000000..dba16161ec32 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/fgprof.go @@ -0,0 +1,97 @@ +// fgprof is a sampling Go profiler that allows you to analyze On-CPU as well +// as [Off-CPU](http://www.brendangregg.com/offcpuanalysis.html) (e.g. I/O) +// time together. +package fgprof + +import ( + "io" + "runtime" + "strings" + "time" +) + +// Start begins profiling the goroutines of the program and returns a function +// that needs to be invoked by the caller to stop the profiling and write the +// results to w using the given format. +func Start(w io.Writer, format Format) func() error { + // Go's CPU profiler uses 100hz, but 99hz might be less likely to result in + // accidental synchronization with the program we're profiling. + const hz = 99 + ticker := time.NewTicker(time.Second / hz) + stopCh := make(chan struct{}) + + stackCounts := stackCounter{} + go func() { + defer ticker.Stop() + + for { + select { + case <-ticker.C: + stackCounts.Update() + case <-stopCh: + return + } + } + }() + + return func() error { + stopCh <- struct{}{} + return writeFormat(w, stackCounts, format, hz) + } +} + +type stackCounter map[string]int + +func (s stackCounter) Update() { + // Determine the runtime.Frame of this func so we can hide it from our + // profiling output. + rpc := make([]uintptr, 1) + n := runtime.Callers(1, rpc) + if n < 1 { + panic("could not determine selfFrame") + } + selfFrame, _ := runtime.CallersFrames(rpc).Next() + + // COPYRIGHT: The code for populating `p` below is copied from + // writeRuntimeProfile in src/runtime/pprof/pprof.go. + // + // Find out how many records there are (GoroutineProfile(nil)), + // allocate that many records, and get the data. + // There's a race—more records might be added between + // the two calls—so allocate a few extra records for safety + // and also try again if we're very unlucky. + // The loop should only execute one iteration in the common case. + var p []runtime.StackRecord + n, ok := runtime.GoroutineProfile(nil) + for { + // Allocate room for a slightly bigger profile, + // in case a few more entries have been added + // since the call to ThreadProfile. + p = make([]runtime.StackRecord, n+10) + n, ok = runtime.GoroutineProfile(p) + if ok { + p = p[0:n] + break + } + // Profile grew; try again. + } + +outer: + for _, pp := range p { + frames := runtime.CallersFrames(pp.Stack()) + + var stack []string + for { + frame, more := frames.Next() + if !more { + break + } else if frame.Entry == selfFrame.Entry { + continue outer + } + + stack = append([]string{frame.Function}, stack...) + } + key := strings.Join(stack, ";") + s[key]++ + } +} diff --git a/vendor/github.com/felixge/fgprof/format.go b/vendor/github.com/felixge/fgprof/format.go new file mode 100644 index 000000000000..1a351e39c2b9 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/format.go @@ -0,0 +1,102 @@ +package fgprof + +import ( + "fmt" + "io" + "sort" + "strings" + + "github.com/google/pprof/profile" +) + +type Format string + +const ( + // FormatFolded is used by Brendan Gregg's FlameGraph utility, see + // https://github.com/brendangregg/FlameGraph#2-fold-stacks. + FormatFolded Format = "folded" + // FormatPprof is used by Google's pprof utility, see + // https://github.com/google/pprof/blob/master/proto/README.md. + FormatPprof Format = "pprof" +) + +func writeFormat(w io.Writer, s stackCounter, f Format, hz int) error { + switch f { + case FormatFolded: + return writeFolded(w, s) + case FormatPprof: + return toPprof(s, hz).Write(w) + default: + return fmt.Errorf("unknown format: %q", f) + } +} + +func writeFolded(w io.Writer, s stackCounter) error { + for _, stack := range sortedKeys(s) { + count := s[stack] + if _, err := fmt.Fprintf(w, "%s %d\n", stack, count); err != nil { + return err + } + } + return nil +} + +func toPprof(s stackCounter, hz int) *profile.Profile { + functionID := uint64(1) + locationID := uint64(1) + line := int64(1) + + p := &profile.Profile{} + m := &profile.Mapping{ID: 1, HasFunctions: true} + p.Mapping = []*profile.Mapping{m} + p.SampleType = []*profile.ValueType{ + { + Type: "samples", + Unit: "count", + }, + { + Type: "time", + Unit: "nanoseconds", + }, + } + + for stack, count := range s { + sample := &profile.Sample{ + Value: []int64{ + int64(count), + int64(1000 * 1000 * 1000 / hz * count), + }, + } + for _, fnName := range strings.Split(stack, ";") { + function := &profile.Function{ + ID: functionID, + Name: fnName, + } + p.Function = append(p.Function, function) + + location := &profile.Location{ + ID: locationID, + Mapping: m, + Line: []profile.Line{{Function: function}}, + } + p.Location = append(p.Location, location) + sample.Location = append([]*profile.Location{location}, sample.Location...) + + line++ + + locationID++ + functionID++ + } + p.Sample = append(p.Sample, sample) + } + return p +} + +func sortedKeys(s stackCounter) []string { + var keys []string + for stack := range s { + keys = append(keys, stack) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/felixge/fgprof/go.mod b/vendor/github.com/felixge/fgprof/go.mod new file mode 100644 index 000000000000..b2ce6fd982c5 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/go.mod @@ -0,0 +1,5 @@ +module github.com/felixge/fgprof + +go 1.14 + +require github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040 diff --git a/vendor/github.com/felixge/fgprof/go.sum b/vendor/github.com/felixge/fgprof/go.sum new file mode 100644 index 000000000000..07ac5705c592 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/go.sum @@ -0,0 +1,7 @@ +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040 h1:i7RUpu0EybzQyQvPT7J3MmODs4+gPcHsD/pqW0uIYVo= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/felixge/fgprof/handler.go b/vendor/github.com/felixge/fgprof/handler.go new file mode 100644 index 000000000000..a25cdc695e8d --- /dev/null +++ b/vendor/github.com/felixge/fgprof/handler.go @@ -0,0 +1,32 @@ +package fgprof + +import ( + "fmt" + "net/http" + "time" +) + +// Handler returns an http handler that takes an optional "seconds" query +// argument that defaults to "30" and produces a profile over this duration. +// The optional "format" parameter controls if the output is written in +// Google's "pprof" format (default) or Brendan Gregg's "folded" stack format. +func Handler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var seconds int + if s := r.URL.Query().Get("seconds"); s == "" { + seconds = 30 + } else if _, err := fmt.Sscanf(s, "%d", &seconds); err != nil || seconds <= 0 { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "bad seconds: %d: %s\n", seconds, err) + } + + format := Format(r.URL.Query().Get("format")) + if format == "" { + format = FormatPprof + } + + stop := Start(w, format) + defer stop() + time.Sleep(time.Duration(seconds) * time.Second) + }) +} diff --git a/vendor/github.com/felixge/fgprof/pprof.go b/vendor/github.com/felixge/fgprof/pprof.go new file mode 100644 index 000000000000..f0908e8e01b4 --- /dev/null +++ b/vendor/github.com/felixge/fgprof/pprof.go @@ -0,0 +1,56 @@ +package fgprof + +import ( + "strings" + + "github.com/google/pprof/profile" +) + +func toProfile(s stackCounter, hz int) *profile.Profile { + functionID := uint64(1) + locationID := uint64(1) + + p := &profile.Profile{} + m := &profile.Mapping{ID: 1, HasFunctions: true} + p.Mapping = []*profile.Mapping{m} + p.SampleType = []*profile.ValueType{ + { + Type: "samples", + Unit: "count", + }, + { + Type: "time", + Unit: "nanoseconds", + }, + } + + for _, stack := range sortedKeys(s) { + count := s[stack] + sample := &profile.Sample{ + Value: []int64{ + int64(count), + int64(1000 * 1000 * 1000 / hz * count), + }, + } + for _, fnName := range strings.Split(stack, ";") { + function := &profile.Function{ + ID: functionID, + Name: fnName, + } + p.Function = append(p.Function, function) + + location := &profile.Location{ + ID: locationID, + Mapping: m, + Line: []profile.Line{{Function: function}}, + } + p.Location = append(p.Location, location) + sample.Location = append(sample.Location, location) + + locationID++ + functionID++ + } + p.Sample = append(p.Sample, sample) + } + return p +} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig index ba49e3c23491..fad895851e56 100644 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -1,5 +1,12 @@ root = true -[*] +[*.go] indent_style = tab indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000000..32f1001be0a5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml index 981d1bb8132d..a9c30165cddf 100644 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -2,29 +2,35 @@ sudo: false language: go go: - - 1.8.x - - 1.9.x - - tip + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" matrix: + include: + - go: "stable" + env: GOLINT=true allow_failures: - go: tip fast_finish: true -before_script: - - go get -u github.com/golang/lint/golint + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi script: - - go test -v --race ./... + - go test --race ./... after_script: - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - test -z "$(golint ./... | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - go vet ./... os: - linux - osx + - windows notifications: email: false diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index f21e54080090..e180c8fb0599 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,5 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 3993207413a7..b2629e5229ca 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -10,16 +10,16 @@ go get -u golang.org/x/sys/... Cross platform: Windows, Linux, BSD and macOS. -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | \* Android and iOS are untested. @@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + ## Contributing Please refer to [CONTRIBUTING][] before opening an issue or pull request. @@ -65,6 +112,10 @@ There are OS-specific limits as to how many watches can be created: * Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. * BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + [#62]: https://github.com/howeyc/fsnotify/issues/62 [#18]: https://github.com/fsnotify/fsnotify/issues/18 [#11]: https://github.com/fsnotify/fsnotify/issues/11 diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 190bf0de5756..89cab046d124 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -63,4 +63,6 @@ func (e Event) String() string { } // Common errors that can be reported by a watcher -var ErrEventOverflow = errors.New("fsnotify queue overflow") +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 000000000000..ff11e13f2240 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 000000000000..f60af9855da7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index cc7db4b22ef5..b33f2b4d4b79 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -40,12 +40,12 @@ func newFdPoller(fd int) (*fdPoller, error) { poller.fd = fd // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) if poller.epfd == -1 { return nil, errno } // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) if errno != nil { return nil, errno } diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 7d8de14513ed..2306c4620bf6 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -8,4 +8,4 @@ package fsnotify import "golang.org/x/sys/unix" -const openMode = unix.O_NONBLOCK | unix.O_RDONLY +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 9139e17161bf..870c4d6d1845 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -9,4 +9,4 @@ package fsnotify import "golang.org/x/sys/unix" // note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore new file mode 100644 index 000000000000..b975a7b4c326 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.gitignore @@ -0,0 +1,3 @@ +*.rdb +testdata/*/ +.idea/ diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml new file mode 100644 index 000000000000..2132eee96f2b --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml @@ -0,0 +1,21 @@ +run: + concurrency: 8 + deadline: 5m + tests: false +linters: + enable-all: true + disable: + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - goconst + - godox + - gosec + - maligned + - wsl + - gomnd + - goerr113 + - exhaustive + - gofumpt + - nestif diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc new file mode 100644 index 000000000000..8b7f044ad1f5 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.prettierrc @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml new file mode 100644 index 000000000000..adedd8df4e46 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/.travis.yml @@ -0,0 +1,20 @@ +dist: xenial +language: go + +services: + - redis-server + +go: + - 1.14.x + - 1.15.x + - tip + +matrix: + allow_failures: + - go: tip + +go_import_path: github.com/go-redis/redis + +before_install: + - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- + -b $(go env GOPATH)/bin v1.28.3 diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md new file mode 100644 index 000000000000..30fd58641083 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md @@ -0,0 +1,85 @@ +# Changelog + +> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev) + +## v8 (unreleased) + +- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not + using `context.Context` yet, the simplest option is to define global package variable + `var ctx = context.TODO()` and use it when `ctx` is required. +- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. + +- Added `redisext.OpenTemetryHook` that adds + [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). + +- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move + existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: + +```go +import "github.com/golang/groupcache/consistenthash" + +ring := redis.NewRing(&redis.RingOptions{ + NewConsistentHash: func() { + return consistenthash.New(100, crc32.ChecksumIEEE) + }, +}) +``` + +## v7.3 + +- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection + URL contains username. + +## v7.2 + +- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. + +## v7.1 + +- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` + interface. + +## v7 + +- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a + transactional pipeline. +- WrapProcess is replaced with more convenient AddHook that has access to context.Context. +- WithContext now can not be used to create a shallow copy of the client. +- New methods ProcessContext, DoContext, and ExecContext. +- Client respects Context.Deadline when setting net.Conn deadline. +- Client listens on Context.Done while waiting for a connection from the pool and returns an error + when context context is cancelled. +- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow + detecting reconnections. +- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse + the time. +- `SetLimiter` is removed and added `Options.Limiter` instead. +- `HMSet` is deprecated as of Redis v4. + +## v6.15 + +- Cluster and Ring pipelines process commands for each node in its own goroutine. + +## 6.14 + +- Added Options.MinIdleConns. +- Added Options.MaxConnAge. +- PoolStats.FreeConns is renamed to PoolStats.IdleConns. +- Add Client.Do to simplify creating custom commands. +- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. +- Lower memory usage. + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set + `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout + occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis + Servers that don't have cluster mode enabled. See + https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE new file mode 100644 index 000000000000..298bed9beaf7 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile new file mode 100644 index 000000000000..86609c6e07c9 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/Makefile @@ -0,0 +1,20 @@ +all: testdeps + go test ./... + go test ./... -short -race + go test ./... -run=NONE -bench=. -benchmem + env GOOS=linux GOARCH=386 go test ./... + golangci-lint run + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + cd $< && make all diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md new file mode 100644 index 000000000000..2eace5d95998 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/README.md @@ -0,0 +1,150 @@ +# Redis client for Golang + +[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +[![Documentation](https://img.shields.io/badge/pg-documentation-informational)](https://redis.uptrace.dev/) + +> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev) + +- [Docs](https://redis.uptrace.dev) +- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) + +## Ecosystem + +- [redisext](https://github.com/go-redis/redisext) - tracing using OpenTelemetryHook. +- [Redis Cache](https://github.com/go-redis/cache). +- [Rate limiting](https://github.com/go-redis/redis_rate). +- [Distributed Locks](https://github.com/bsm/redislock). + +## Features + +- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC. +- Automatic connection pooling with + [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. +- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). +- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). +- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and + [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). +- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). +- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). +- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). +- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). +- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup) + without using cluster mode and Redis Sentinel. +- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). +- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation). + +API docs: https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc. Examples: +https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples. + +## Installation + +go-redis requires a Go version with [Modules](https://github.com/golang/go/wiki/Modules) support and +uses import versioning. So please make sure to initialize a Go module before installing go-redis: + +```shell +go mod init github.com/my/repo +go get github.com/go-redis/redis/v8 +``` + +Import: + +```go +import "github.com/go-redis/redis/v8" +``` + +## Quickstart + +```go +import ( + "context" + "github.com/go-redis/redis/v8" +) + +var ctx = context.Background() + +func ExampleNewClient() { + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + pong, err := rdb.Ping(ctx).Result() + fmt.Println(pong, err) + // Output: PONG +} + +func ExampleClient() { + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + err := rdb.Set(ctx, "key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := rdb.Get(ctx, "key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := rdb.Get(ctx, "key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Howto + +Please go through [examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) +to get an idea how to use this package. + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ + Keys: []string{"zset1", "zset2"}, + Weights: []int64{2, 3} +}).Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() + +// custom command +res, err := rdb.Do(ctx, "set", "key", "value").Result() +``` + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang msgpack](https://github.com/vmihailenco/msgpack) +- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go new file mode 100644 index 000000000000..346d675fd5fa --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/cluster.go @@ -0,0 +1,1694 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hashtag" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" + "golang.org/x/exp/rand" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // NewClient creates a cluster node client with provided name and options. + NewClient func(opt *Options) *Client + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 8 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func() ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 8 + } + + if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.NumCPU() + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } + + if opt.NewClient == nil { + opt.NewClient = NewClient + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + Username: opt.Username, + Password: opt.Password, + readOnly: opt.ReadOnly, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + failing uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: clOpt.NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const probes = 10 + + var latency uint32 + for i := 0; i < probes; i++ { + start := time.Now() + n.Client.Ping(context.TODO()) + probe := uint32(time.Since(start) / time.Microsecond) + latency = (latency + probe) / 2 + } + atomic.StoreUint32(&n.latency, latency) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsFailing() { + atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Failing() bool { + const timeout = 15 // 15 seconds + + failing := atomic.LoadUint32(&n.failing) + if failing == 0 { + return false + } + if time.Now().Unix()-int64(failing) < timeout { + return true + } + atomic.StoreUint32(&n.failing, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + addrs []string + nodes map[string]*clusterNode + activeAddrs []string + closed bool + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + addrs: opt.Addrs, + nodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.nodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.nodes = nil + c.activeAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + c.mu.RLock() + closed := c.closed + if !closed { + if len(c.activeAddrs) > 0 { + addrs = c.activeAddrs + } else { + addrs = c.addrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + //nolint:prealloc + var collected []*clusterNode + + c.mu.Lock() + + c.activeAddrs = c.activeAddrs[:0] + for addr, node := range c.nodes { + if node.Generation() >= generation { + c.activeAddrs = append(c.activeAddrs, addr) + continue + } + + delete(c.nodes, addr) + collected = append(collected, node) + } + + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) Get(addr string) (*clusterNode, error) { + node, err := c.get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.nodes[addr] + if ok { + return node, nil + } + + node = newClusterNode(c.opt, addr) + + c.addrs = appendIfNotExists(c.addrs, addr) + c.nodes[addr] = node + + return node, nil +} + +func (c *clusterNodes) get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.nodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.nodes)) + for _, node := range c.nodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.Get(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.Get(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Failing() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Failing() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + const threshold = time.Millisecond + + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Failing() { + continue + } + if node == nil || node.Latency()-n.Latency() > threshold { + node = n + } + } + if node != nil { + return node, nil + } + // If all nodes are failing - return random node + return c.nodes.Random() +} + +func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + n := rand.Intn(len(nodes)) + return nodes[n], nil +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func(ctx context.Context) (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { + state, err := c.load(ctx) + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload(ctx context.Context) { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload(ctx) + if err != nil { + return + } + time.Sleep(200 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { + v := c.state.Load() + if v != nil { + state := v.(*clusterState) + if time.Since(state.createdAt) > 10*time.Second { + c.LazyReload(ctx) + } + return state, nil + } + return c.Reload(ctx) +} + +func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { + state, err := c.Reload(ctx) + if err == nil { + return state, nil + } + return c.Get(ctx) +} + +//------------------------------------------------------------------------------ + +type clusterClient struct { + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder //nolint:structcheck + cmdsInfoCache *cmdsInfoCache //nolint:structcheck +} + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + *clusterClient + cmdable + hooks + ctx context.Context +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + clusterClient: &clusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + }, + ctx: context.Background(), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + c.cmdable = c.Process + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +func (c *ClusterClient) Context() context.Context { + return c.ctx +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState(ctx context.Context) error { + _, err := c.state.Reload(ctx) + return err +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.process) +} + +func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { + err := c._process(ctx, cmd) + if err != nil { + cmd.SetErr(err) + return err + } + return nil +} + +func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error { + cmdInfo := c.cmdInfo(cmd.Name()) + slot := c.cmdSlot(cmd) + + var node *clusterNode + var ask bool + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + if node == nil { + var err error + node, err = c.cmdNode(ctx, cmdInfo, slot) + if err != nil { + return err + } + } + + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(ctx, NewCmd(ctx, "asking")) + _ = pipe.Process(ctx, cmd) + _, lastErr = pipe.Exec(ctx) + _ = pipe.Close() + ask = false + } else { + lastErr = node.Client.Process(ctx, cmd) + } + + // If there is no error - we are done. + if lastErr == nil { + return nil + } + if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload(ctx) + } + node = nil + continue + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && isLoadingError(lastErr) { + node.MarkAsFailing() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = isMovedError(lastErr) + if moved || ask { + var err error + node, err = c.nodes.Get(addr) + if err != nil { + return err + } + continue + } + + if shouldRetry(lastErr, cmd.readTimeout() == nil) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try another node. + node.MarkAsFailing() + node = nil + continue + } + + return lastErr + } + return lastErr +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachShard concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get(context.TODO()) + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots() + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + for _, idx := range rand.Perm(len(addrs)) { + addr := addrs[idx] + + node, err := c.nodes.Get(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots(ctx).Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + /* + * No node is connectable. It's possible that all nodes' IP has changed. + * Clear activeAddrs to let client be able to re-connect using the initial + * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), + * which might have chance to resolve domain name and get updated IP address. + */ + c.nodes.mu.Lock() + c.nodes.activeAddrs = nil + c.nodes.mu.Unlock() + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c._processPipeline) +} + +func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(ctx, cmdsMap, cmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil + } + + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := state.slotMasterNode(slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) _processPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) + }) + }) + }) +} + +func (c *ClusterClient) pipelineReadCmds( + ctx context.Context, + node *clusterNode, + rd *proto.Reader, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + if err == nil { + continue + } + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + continue + } + + if c.opt.ReadOnly && isLoadingError(err) { + node.MarkAsFailing() + return err + } + if isRedisError(err) { + continue + } + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := isMovedError(err) + if !moved && !ask { + return false + } + + node, err := c.nodes.Get(addr) + if err != nil { + return false + } + + if moved { + c.state.LazyReload(ctx) + failedCmds.Add(node, cmd) + return true + } + + if ask { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + return true + } + + panic("not reached") +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline) +} + +func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { + state, err := c.state.Get(ctx) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) _processTxPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + cmds = cmds[1 : len(cmds)-1] + + err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) + if err != nil { + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) + } + return err + } + + return pipelineReadCmds(rd, cmds) + }) + }) + }) +} + +func (c *ClusterClient) txPipelineReadQueued( + ctx context.Context, + rd *proto.Reader, + statusCmd *StatusCmd, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { + continue + } + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + return proto.ParseErrorReply(line) + case proto.ArrayReply: + // ok + default: + return fmt.Errorf("redis: expected '*', but got line %q", line) + } + + return nil +} + +func (c *ClusterClient) cmdsMoved( + ctx context.Context, cmds []Cmder, + moved, ask bool, + addr string, + failedCmds *cmdsMap, +) error { + node, err := c.nodes.Get(addr) + if err != nil { + return err + } + + if moved { + c.state.LazyReload(ctx) + for _, cmd := range cmds { + failedCmds.Add(node, cmd) + } + return nil + } + + if ask { + for _, cmd := range cmds { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + } + return nil + } + + return nil +} + +func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + err = node.Client.Watch(ctx, fn, keys...) + if err == nil { + break + } + + moved, ask, addr := isMovedError(err) + if moved || ask { + node, err = c.nodes.Get(addr) + if err != nil { + return err + } + continue + } + + if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload(ctx) + } + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + continue + } + + if shouldRetry(err, true) { + continue + } + + return err + } + + return err +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + var err error + if len(channels) > 0 { + slot := hashtag.Slot(channels[0]) + node, err = c.slotMasterNode(ctx, slot) + } else { + node, err = c.nodes.Random() + } + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn(context.TODO()) + if err != nil { + node = nil + + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) { + // Try 3 random nodes. + const nodeLimit = 3 + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + perm := rand.Perm(len(addrs)) + if len(perm) > nodeLimit { + perm = perm[:nodeLimit] + } + + for _, idx := range perm { + addr := addrs[idx] + + node, err := c.nodes.Get(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + info, err := node.Client.Command(c.ctx).Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + + if firstErr == nil { + panic("not reached") + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get() + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + } + return info +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdNode( + ctx context.Context, + cmdInfo *CommandInfo, + slot int, +) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + return c.slotReadOnlyNode(state, slot) + } + return state.slotMasterNode(slot) +} + +func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { + if c.opt.RouteByLatency { + return state.slotClosestNode(slot) + } + if c.opt.RouteRandomly { + return state.slotRandomNode(slot) + } + return state.slotSlaveNode(slot) +} + +func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + return state.slotMasterNode(slot) +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +//------------------------------------------------------------------------------ + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { + m.mu.Lock() + m.m[node] = append(m.m[node], cmds...) + m.mu.Unlock() +} diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go new file mode 100644 index 000000000000..1f0bae067ae6 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go @@ -0,0 +1,25 @@ +package redis + +import ( + "context" + "sync/atomic" +) + +func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "dbsize") + var size int64 + err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { + n, err := master.DBSize(ctx).Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.SetErr(err) + return cmd + } + cmd.val = size + return cmd +} diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go new file mode 100644 index 000000000000..81e0c4db6f78 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/command.go @@ -0,0 +1,2169 @@ +package redis + +import ( + "context" + "fmt" + "net" + "strconv" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/util" +) + +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + String() string + stringArg(int) string + + readTimeout() *time.Duration + readReply(rd *proto.Reader) error + + SetErr(error) + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.SetErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmds(wr *proto.Writer, cmds []Cmder) error { + for _, cmd := range cmds { + if err := writeCmd(wr, cmd); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmd Cmder) error { + return wr.WriteArgs(cmd.Args()) +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + case "memory": + // https://github.com/redis/redis/issues/7493 + if cmd.stringArg(1) == "usage" { + return 2 + } + } + + if info == nil { + return 0 + } + return int(info.FirstKeyPos) +} + +func cmdString(cmd Cmder, val interface{}) string { + b := make([]byte, 0, 64) + + for i, arg := range cmd.Args() { + if i > 0 { + b = append(b, ' ') + } + b = internal.AppendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } else if val != nil { + b = append(b, ": "...) + b = internal.AppendArg(b, val) + } + + return internal.String(b) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + ctx context.Context + args []interface{} + err error + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Name() string { + if len(cmd.args) == 0 { + return "" + } + // Cmd name must be lower cased. + return internal.ToLower(cmd.stringArg(0)) +} + +func (cmd *baseCmd) FullName() string { + switch name := cmd.Name(); name { + case "cluster", "command": + if len(cmd.args) == 1 { + return name + } + if s2, ok := cmd.args[1].(string); ok { + return name + " " + s2 + } + return name + default: + return name + } +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd.args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd.args) { + return "" + } + s, _ := cmd.args[pos].(string) + return s +} + +func (cmd *baseCmd) SetErr(e error) { + cmd.err = e +} + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(ctx context.Context, args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) Text() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + switch val := cmd.val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadReply(sliceParser) + return cmd.err +} + +// sliceParser implements proto.MultiBulkParse. +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, n) + for i := 0; i < len(vals); i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals[i] = nil + continue + } + if err, ok := err.(proto.RedisError); ok { + vals[i] = err + continue + } + return nil, err + } + vals[i] = v + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) Uint64() (uint64, error) { + return uint64(cmd.val), cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadIntReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntSliceCmd struct { + baseCmd + + val []int64 +} + +var _ Cmder = (*IntSliceCmd)(nil) + +func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { + return &IntSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntSliceCmd) Val() []int64 { + return cmd.val +} + +func (cmd *IntSliceCmd) Result() ([]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]int64, n) + for i := 0; i < len(cmd.val); i++ { + num, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = num + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + var n int64 + n, cmd.err = rd.ReadIntReply() + if cmd.err != nil { + return cmd.err + } + switch n { + // -2 if the key does not exist + // -1 if the key exists but has no associated expire + case -2, -1: + cmd.val = time.Duration(n) + default: + cmd.val = time.Duration(n) * cmd.precision + } + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + cmd.val = time.Unix(sec, microsec*1000) + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + if cmd.err == Nil { + cmd.val = false + cmd.err = nil + return nil + } + if cmd.err != nil { + return cmd.err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) + return cmd.err + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Time() (time.Time, error) { + if cmd.err != nil { + return time.Time{}, cmd.err + } + return time.Parse(time.RFC3339Nano, cmd.Val()) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadFloatReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]string, n) + for i := 0; i < len(cmd.val); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.val[i] = "" + case err != nil: + return nil, err + default: + cmd.val[i] = s + } + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]bool, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = n == 1 + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val[key] = value + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + cmd.val[key] = n + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + cmd.val[key] = struct{}{} + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xMessageSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XMessage) + return nil +} + +// xMessageSliceParser implements proto.MultiBulkParse. +func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + msgs := make([]XMessage, n) + for i := 0; i < len(msgs); i++ { + i := i + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + var values map[string]interface{} + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + if err != proto.Nil { + return nil, err + } + } else { + values = v.(map[string]interface{}) + } + + msgs[i] = XMessage{ + ID: id, + Values: values, + } + return nil, nil + }) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +// stringInterfaceMapParser implements proto.MultiBulkParse. +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XStream, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xMessageSliceParser) + if err != nil { + return nil, err + } + + cmd.val[i] = XStream{ + Stream: stream, + Messages: v.([]XMessage), + } + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if cmd.val.Consumers == nil { + cmd.val.Consumers = make(map[string]int64) + } + cmd.val.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + ID string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = append(cmd.val, XPendingExt{ + ID: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type XInfoGroupsCmd struct { + baseCmd + val []XInfoGroups +} + +type XInfoGroups struct { + Name string + Consumers int64 + Pending int64 + LastDeliveredID string +} + +var _ Cmder = (*XInfoGroupsCmd)(nil) + +func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { + return &XInfoGroupsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + }, + } +} + +func (cmd *XInfoGroupsCmd) Val() []XInfoGroups { + return cmd.val +} + +func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroups, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoGroupsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply( + func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(xGroupInfoParser) + if err != nil { + return nil, err + } + cmd.val = append(cmd.val, v.(XInfoGroups)) + } + return nil, nil + }) + return nil +} + +func xGroupInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 8 { + return nil, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply,"+ + "wanted 8", n) + } + var ( + err error + grp XInfoGroups + key string + val string + ) + + for i := 0; i < 4; i++ { + key, err = rd.ReadString() + if err != nil { + return nil, err + } + val, err = rd.ReadString() + if err != nil { + return nil, err + } + switch key { + case "name": + grp.Name = val + case "consumers": + grp.Consumers, err = strconv.ParseInt(val, 0, 64) + case "pending": + grp.Pending, err = strconv.ParseInt(val, 0, 64) + case "last-delivered-id": + grp.LastDeliveredID = val + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO GROUPS reply", key) + } + if err != nil { + return nil, err + } + } + return grp, err +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]Z, n/2) + for i := 0; i < len(cmd.val); i++ { + member, err := rd.ReadString() + if err != nil { + return nil, err + } + + score, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = Z{ + Member: member, + Score: score, + } + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val *ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZWithKeyCmd) Val() *ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 3 { + return nil, fmt.Errorf("got %d elements, expected 3", n) + } + + cmd.val = &ZWithKey{} + var err error + + cmd.val.Key, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process cmdable +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + process: process, + } +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) error { + cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply() + return cmd.err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + ID string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]ClusterSlot, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].ID = id + } + } + + cmd.val[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + return &GeoLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: geoLocationArgs(q, args...), + }, + q: q, + } +} + +func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return args +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if cmd.err != nil { + return cmd.err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + // TODO: avoid copying + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + val []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.val +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]*GeoPos, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + longitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + latitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = &GeoPos{ + Longitude: longitude, + Latitude: latitude, + } + return nil, nil + }) + if err != nil { + if err == Nil { + cmd.val[i] = nil + continue + } + return nil, err + } + } + return nil, nil + }) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + cmd.val[vv.Name] = vv + } + return nil, nil + }) + return cmd.err +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + const numArgRedis5 = 6 + const numArgRedis6 = 7 + + switch n { + case numArgRedis5, numArgRedis6: + // continue + default: + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) + } + + var cmd CommandInfo + var err error + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.Flags = make([]string, n) + for i := 0; i < len(cmd.Flags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.Flags[i] = "" + case err != nil: + return nil, err + default: + cmd.Flags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + if n == numArgRedis5 { + return &cmd, nil + } + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.ACLFlags = make([]string, n) + for i := 0; i < len(cmd.ACLFlags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.ACLFlags[i] = "" + case err != nil: + return nil, err + default: + cmd.ACLFlags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func() (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn() + if err != nil { + return err + } + + // Extensions have cmd names in upper case. Convert them to lower case. + for k, v := range cmds { + lower := internal.ToLower(k) + if lower != k { + cmds[lower] = v + } + } + + c.cmds = cmds + return nil + }) + return c.cmds, err +} diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go new file mode 100644 index 000000000000..46b88eb9c136 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/commands.go @@ -0,0 +1,2688 @@ +package redis + +import ( + "context" + "errors" + "io" + "time" + + "github.com/go-redis/redis/v8/internal" +) + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1ms", + dur, time.Millisecond, + ) + return 1 + } + return int64(dur / time.Millisecond) +} + +func formatSec(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1s", + dur, time.Second, + ) + return 1 + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + return appendArg(dst, src[0]) + } + + dst = append(dst, src...) + return dst +} + +func appendArg(dst []interface{}, arg interface{}) []interface{} { + switch arg := arg.(type) { + case []string: + for _, s := range arg { + dst = append(dst, s) + } + return dst + case []interface{}: + dst = append(dst, arg...) + return dst + case map[string]interface{}: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + default: + return append(dst, arg) + } +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command(ctx context.Context) *CommandsInfoCmd + ClientGetName(ctx context.Context) *StringCmd + Echo(ctx context.Context, message interface{}) *StringCmd + Ping(ctx context.Context) *StatusCmd + Quit(ctx context.Context) *StatusCmd + Del(ctx context.Context, keys ...string) *IntCmd + Unlink(ctx context.Context, keys ...string) *IntCmd + Dump(ctx context.Context, key string) *StringCmd + Exists(ctx context.Context, keys ...string) *IntCmd + Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + Keys(ctx context.Context, pattern string) *StringSliceCmd + Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd + Move(ctx context.Context, key string, db int) *BoolCmd + ObjectRefCount(ctx context.Context, key string) *IntCmd + ObjectEncoding(ctx context.Context, key string) *StringCmd + ObjectIdleTime(ctx context.Context, key string) *DurationCmd + Persist(ctx context.Context, key string) *BoolCmd + PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PTTL(ctx context.Context, key string) *DurationCmd + RandomKey(ctx context.Context) *StringCmd + Rename(ctx context.Context, key, newkey string) *StatusCmd + RenameNX(ctx context.Context, key, newkey string) *BoolCmd + Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd + SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd + Touch(ctx context.Context, keys ...string) *IntCmd + TTL(ctx context.Context, key string) *DurationCmd + Type(ctx context.Context, key string) *StatusCmd + Append(ctx context.Context, key, value string) *IntCmd + Decr(ctx context.Context, key string) *IntCmd + DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + Get(ctx context.Context, key string) *StringCmd + GetRange(ctx context.Context, key string, start, end int64) *StringCmd + GetSet(ctx context.Context, key string, value interface{}) *StringCmd + Incr(ctx context.Context, key string) *IntCmd + IncrBy(ctx context.Context, key string, value int64) *IntCmd + IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd + MGet(ctx context.Context, keys ...string) *SliceCmd + MSet(ctx context.Context, values ...interface{}) *StatusCmd + MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd + StrLen(ctx context.Context, key string) *IntCmd + + GetBit(ctx context.Context, key string, offset int64) *IntCmd + SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd + BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd + BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpNot(ctx context.Context, destKey string, key string) *IntCmd + BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd + + Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd + SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + + HDel(ctx context.Context, key string, fields ...string) *IntCmd + HExists(ctx context.Context, key, field string) *BoolCmd + HGet(ctx context.Context, key, field string) *StringCmd + HGetAll(ctx context.Context, key string) *StringStringMapCmd + HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd + HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd + HKeys(ctx context.Context, key string) *StringSliceCmd + HLen(ctx context.Context, key string) *IntCmd + HMGet(ctx context.Context, key string, fields ...string) *SliceCmd + HSet(ctx context.Context, key string, values ...interface{}) *IntCmd + HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd + HVals(ctx context.Context, key string) *StringSliceCmd + + BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LIndex(ctx context.Context, key string, index int64) *StringCmd + LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LLen(ctx context.Context, key string) *IntCmd + LPop(ctx context.Context, key string) *StringCmd + LPush(ctx context.Context, key string, values ...interface{}) *IntCmd + LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd + LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd + LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd + RPop(ctx context.Context, key string) *StringCmd + RPopLPush(ctx context.Context, source, destination string) *StringCmd + RPush(ctx context.Context, key string, values ...interface{}) *IntCmd + RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + + SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd + SCard(ctx context.Context, key string) *IntCmd + SDiff(ctx context.Context, keys ...string) *StringSliceCmd + SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd + SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd + SMembers(ctx context.Context, key string) *StringSliceCmd + SMembersMap(ctx context.Context, key string) *StringStructMapCmd + SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd + SPop(ctx context.Context, key string) *StringCmd + SPopN(ctx context.Context, key string, count int64) *StringSliceCmd + SRandMember(ctx context.Context, key string) *StringCmd + SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd + SRem(ctx context.Context, key string, members ...interface{}) *IntCmd + SUnion(ctx context.Context, keys ...string) *StringSliceCmd + SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd + + XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XLen(ctx context.Context, stream string) *IntCmd + XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd + XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd + XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd + XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd + XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd + XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd + XGroupDestroy(ctx context.Context, stream, group string) *IntCmd + XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd + XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd + XPending(ctx context.Context, stream, group string) *XPendingCmd + XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd + XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd + XTrim(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd + XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd + + BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZIncr(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd + ZCard(ctx context.Context, key string) *IntCmd + ZCount(ctx context.Context, key, min, max string) *IntCmd + ZLexCount(ctx context.Context, key, min, max string) *IntCmd + ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd + ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRank(ctx context.Context, key, member string) *IntCmd + ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd + ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd + ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd + ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd + ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRevRank(ctx context.Context, key, member string) *IntCmd + ZScore(ctx context.Context, key, member string) *FloatCmd + ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd + + PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd + PFCount(ctx context.Context, keys ...string) *IntCmd + PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd + + BgRewriteAOF(ctx context.Context) *StatusCmd + BgSave(ctx context.Context) *StatusCmd + ClientKill(ctx context.Context, ipPort string) *StatusCmd + ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd + ClientList(ctx context.Context) *StringCmd + ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientID(ctx context.Context) *IntCmd + ConfigGet(ctx context.Context, parameter string) *SliceCmd + ConfigResetStat(ctx context.Context) *StatusCmd + ConfigSet(ctx context.Context, parameter, value string) *StatusCmd + ConfigRewrite(ctx context.Context) *StatusCmd + DBSize(ctx context.Context) *IntCmd + FlushAll(ctx context.Context) *StatusCmd + FlushAllAsync(ctx context.Context) *StatusCmd + FlushDB(ctx context.Context) *StatusCmd + FlushDBAsync(ctx context.Context) *StatusCmd + Info(ctx context.Context, section ...string) *StringCmd + LastSave(ctx context.Context) *IntCmd + Save(ctx context.Context) *StatusCmd + Shutdown(ctx context.Context) *StatusCmd + ShutdownSave(ctx context.Context) *StatusCmd + ShutdownNoSave(ctx context.Context) *StatusCmd + SlaveOf(ctx context.Context, host, port string) *StatusCmd + Time(ctx context.Context) *TimeCmd + DebugObject(ctx context.Context, key string) *StringCmd + ReadOnly(ctx context.Context) *StatusCmd + ReadWrite(ctx context.Context) *StatusCmd + MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptFlush(ctx context.Context) *StatusCmd + ScriptKill(ctx context.Context) *StatusCmd + ScriptLoad(ctx context.Context, script string) *StringCmd + + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd + PubSubNumPat(ctx context.Context) *IntCmd + + ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterNodes(ctx context.Context) *StringCmd + ClusterMeet(ctx context.Context, host, port string) *StatusCmd + ClusterForget(ctx context.Context, nodeID string) *StatusCmd + ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd + ClusterResetSoft(ctx context.Context) *StatusCmd + ClusterResetHard(ctx context.Context) *StatusCmd + ClusterInfo(ctx context.Context) *StringCmd + ClusterKeySlot(ctx context.Context, key string) *IntCmd + ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd + ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd + ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd + ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd + ClusterSaveConfig(ctx context.Context) *StatusCmd + ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd + ClusterFailover(ctx context.Context) *StatusCmd + ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd + + GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd + GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd + GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd + GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd + GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(ctx context.Context, password string) *StatusCmd + AuthACL(ctx context.Context, username, password string) *StatusCmd + Select(ctx context.Context, index int) *StatusCmd + SwapDB(ctx context.Context, index1, index2 int) *StatusCmd + ClientSetName(ctx context.Context, name string) *BoolCmd +} + +var ( + _ Cmdable = (*Client)(nil) + _ Cmdable = (*Tx)(nil) + _ Cmdable = (*Ring)(nil) + _ Cmdable = (*ClusterClient)(nil) +) + +type cmdable func(ctx context.Context, cmd Cmder) error + +type statefulCmdable func(ctx context.Context, cmd Cmder) error + +//------------------------------------------------------------------------------ + +func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", password) + _ = c(ctx, cmd) + return cmd +} + +// Perform an AUTH command, using the given user and pass. +// Should be used to authenticate the current connection with one of the connections defined in the ACL list +// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. +func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", username, password) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { + cmd := NewStatusCmd(ctx, "select", index) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { + cmd := NewStatusCmd(ctx, "swapdb", index1, index2) + _ = c(ctx, cmd) + return cmd +} + +// ClientSetName assigns a name to the connection. +func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { + cmd := NewBoolCmd(ctx, "client", "setname", name) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { + cmd := NewCommandsInfoCmd(ctx, "command") + _ = c(ctx, cmd) + return cmd +} + +// ClientGetName returns the name of the connection. +func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "client", "getname") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "echo", message) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Ping(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "ping") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Quit(ctx context.Context) *StatusCmd { + panic("not implemented") +} + +func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "dump", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "keys", pattern) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "migrate", + host, + port, + key, + db, + formatMs(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { + cmd := NewBoolCmd(ctx, "move", key, db) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "object", "refcount", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "object", "encoding", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { + cmd := NewBoolCmd(ctx, "persist", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + ctx, + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RandomKey(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "randomkey") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { + cmd := NewStatusCmd(ctx, "rename", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { + cmd := NewBoolCmd(ctx, "renamenx", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + "replace", + ) + _ = c(ctx, cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "ttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { + cmd := NewStatusCmd(ctx, "type", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { + cmd := NewIntCmd(ctx, "append", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "decr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { + cmd := NewIntCmd(ctx, "decrby", key, decrement) + _ = c(ctx, cmd) + return cmd +} + +// Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c cmdable) Get(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "get", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { + cmd := NewStringCmd(ctx, "getrange", key, start, end) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "getset", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "incr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { + cmd := NewIntCmd(ctx, "incrby", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSet is like Set but accepts multiple values: +// - MSet("key1", "value1", "key2", "value2") +// - MSet([]string{"key1", "value1", "key2", "value2"}) +// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "mset" + args = appendArgs(args, values) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSetNX is like SetNX but accepts multiple values: +// - MSetNX("key1", "value1", "key2", "value2") +// - MSetNX([]string{"key1", "value1", "key2", "value2"}) +// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "msetnx" + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Use expiration for `SETEX`-like behavior. +// Zero expiration means the key has no expiration time. +func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 5) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd(ctx, "setnx", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") + } + } + _ = c(ctx, cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + cmd = NewBoolCmd(ctx, "set", key, value, "xx") + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") + } + } + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd(ctx, "setrange", key, offset, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "strlen", key) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { + cmd := NewIntCmd(ctx, "getbit", key, offset) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + ctx, + "setbit", + key, + offset, + value, + ) + _ = c(ctx, cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "and", destKey, keys...) +} + +func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "or", destKey, keys...) +} + +func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "xor", destKey, keys...) +} + +func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { + return c.bitOp(ctx, "not", destKey, key) +} + +func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { + a := make([]interface{}, 0, 2+len(args)) + a = append(a, "bitfield") + a = append(a, key) + a = append(a, args...) + cmd := NewIntSliceCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { + cmd := NewBoolCmd(ctx, "hexists", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { + cmd := NewStringCmd(ctx, "hget", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd(ctx, "hgetall", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { + cmd := NewIntCmd(ctx, "hincrby", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hkeys", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "hlen", key) + _ = c(ctx, cmd) + return cmd +} + +// HMGet returns the values for the specified fields in the hash stored at key. +// It returns an interface{} to distinguish between empty string and nil value. +func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HSet accepts values in following formats: +// - HSet("myhash", "key1", "value1", "key2", "value2") +// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// +// Note that it requires Redis v4 for multiple field/value pairs support. +func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hset" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HMSet is a deprecated version of HSet left for compatibility with Redis 3. +func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hmset" + args[1] = key + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hvals", key) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + ctx, + "brpoplpush", + source, + destination, + formatSec(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { + cmd := NewStringCmd(ctx, "lindex", key, index) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "llen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "lpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + ctx, + "lrange", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "lrem", key, count, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "lset", key, index, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "ltrim", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "rpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { + cmd := NewStringCmd(ctx, "rpoplpush", source, destination) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "scard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "sismember", key, member) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a slice. +func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a map. +func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "smove", source, destination, member) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SPOP key` command. +func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "spop", key) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SPOP key count` command. +func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "spop", key, count) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SRANDMEMBER key` command. +func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "srandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// Redis `SRANDMEMBER key count` command. +func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "srandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// XAddArgs accepts values in the following formats: +// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} +// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") +// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} +// +// Note that map will not preserve the order of key-value pairs. +type XAddArgs struct { + Stream string + MaxLen int64 // MAXLEN N + MaxLenApprox int64 // MAXLEN ~ N + ID string + Values interface{} +} + +func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 8) + args = append(args, "xadd") + args = append(args, a.Stream) + if a.MaxLen > 0 { + args = append(args, "maxlen", a.MaxLen) + } else if a.MaxLenApprox > 0 { + args = append(args, "maxlen", "~", a.MaxLenApprox) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + args = appendArg(args, a.Values) + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { + cmd := NewIntCmd(ctx, "xlen", stream) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +type XReadArgs struct { + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration +} + +func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 5+len(a.Streams)) + args = append(args, "xread") + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + } + + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { + return c.XRead(ctx, &XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration + NoAck bool +} + +func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 8+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + } + if a.NoAck { + args = append(args, "noack") + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { + cmd := NewXPendingCmd(ctx, "xpending", stream, group) + _ = c(ctx, cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Start string + End string + Count int64 + Consumer string +} + +func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 4+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { + cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { + cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { + cmd := NewXInfoGroupsCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInterStore and ZUnionStore. +type ZStore struct { + Keys []string + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +// Redis `BZPOPMAX key [key ...] timeout` command. +func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// Redis `BZPOPMIN key [key ...] timeout` command. +func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewIntCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +// Redis `ZADD key score member [score member ...]` command. +func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { + const n = 2 + a := make([]interface{}, n+2*len(members)) + a[0], a[1] = "zadd", key + return c.zAdd(ctx, a, n, members...) +} + +// Redis `ZADD key NX score member [score member ...]` command. +func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "nx" + return c.zAdd(ctx, a, n, members...) +} + +// Redis `ZADD key XX score member [score member ...]` command. +func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "xx" + return c.zAdd(ctx, a, n, members...) +} + +// Redis `ZADD key CH score member [score member ...]` command. +func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "ch" + return c.zAdd(ctx, a, n, members...) +} + +// Redis `ZADD key NX CH score member [score member ...]` command. +func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" + return c.zAdd(ctx, a, n, members...) +} + +// Redis `ZADD key XX CH score member [score member ...]` command. +func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" + return c.zAdd(ctx, a, n, members...) +} + +func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewFloatCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +// Redis `ZADD key INCR score member` command. +func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { + const n = 3 + a := make([]interface{}, n+2) + a[0], a[1], a[2] = "zadd", key, "incr" + return c.zIncr(ctx, a, n, member) +} + +// Redis `ZADD key NX INCR score member` command. +func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" + return c.zIncr(ctx, a, n, member) +} + +// Redis `ZADD key XX INCR score member` command. +func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" + return c.zIncr(ctx, a, n, member) +} + +func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "zcard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zlexcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { + args := make([]interface{}, 3+len(store.Keys)) + args[0] = "zinterstore" + args[1] = destination + args[2] = len(store.Keys) + for i, key := range store.Keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "zrange", + key, + start, + stop, + } + if withScores { + args = append(args, "withscores") + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + return c.zRange(ctx, key, start, stop, false) +} + +func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores") + _ = c(ctx, cmd) + return cmd +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) +} + +func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebylex", key, opt, false) +} + +func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + ctx, + "zremrangebyrank", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) +} + +func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) +} + +func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrevrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zscore", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { + args := make([]interface{}, 3+len(store.Keys)) + args[0] = "zunionstore" + args[1] = dest + args[2] = len(store.Keys) + for i, key := range store.Keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgrewriteaof") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BgSave(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgsave") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { + cmd := NewStatusCmd(ctx, "client", "kill", ipPort) + _ = c(ctx, cmd) + return cmd +} + +// ClientKillByFilter is new style syntax, while the ClientKill is old +// +// CLIENT KILL