From 189dd65a6b99b3e327477048a2a424c29a4b9cea Mon Sep 17 00:00:00 2001 From: Robby Milo Date: Mon, 20 Jul 2020 12:35:16 +0200 Subject: [PATCH 1/5] update docs location --- docs/Makefile | 11 ++ docs/README.md | 94 +++++++------- docs/best-practices/README.md | 3 - docs/clients/promtail/stages/README.md | 29 ----- docs/community/README.md | 5 - docs/getting-started/README.md | 7 -- docs/maintaining/README.md | 6 - docs/operations/README.md | 11 -- docs/sources/.DS_Store | Bin 0 -> 10244 bytes docs/sources/_index.md | 19 +++ docs/{api.md => sources/api/_index.md} | 118 ++++++++++++------ .../architecture/_index.md} | 10 +- .../architecture}/modes_of_operation.png | Bin docs/sources/best-practices/_index.md | 7 ++ .../best-practices/current-best-practices.md | 9 +- docs/{ => sources}/chunks_diagram.png | Bin .../README.md => sources/clients/_index.md} | 12 +- docs/sources/clients/aws/_index.md | 3 + .../clients/aws/ec2/_index.md} | 29 +++-- .../aws/ec2/promtail-ec2-discovery.png | Bin .../clients/aws/ec2/promtail-ec2-final.yaml | 0 .../clients/aws/ec2/promtail-ec2-logs.png | Bin .../clients/aws/ec2/promtail-ec2.yaml | 0 .../clients/aws/ec2/promtail.service | 0 .../clients/aws/ecs/_index.md} | 11 +- .../clients/aws/ecs/ecs-grafana.png | Bin .../clients/aws/ecs/ecs-role.json | 0 .../clients/aws/ecs/ecs-task.json | 0 .../clients/aws/eks/_index.md} | 18 +-- .../clients/aws/eks/eventrouter.yaml | 0 .../clients/aws/eks/namespace-grafana.png | Bin .../{ => sources}/clients/aws/eks/values.yaml | 0 .../clients/docker-driver/_index.md} | 9 +- .../clients/docker-driver/configuration.md | 5 +- .../clients/fluentd/_index.md} | 9 +- .../clients/promtail/_index.md} | 15 ++- .../clients/promtail/configuration.md | 104 ++++++++------- .../clients/promtail/installation.md | 5 +- .../clients/promtail/pipelines.md | 33 ++--- .../clients/promtail/scraping.md | 7 +- .../sources/clients/promtail/stages/_index.md | 32 +++++ .../clients/promtail/stages/cri.md | 3 + .../clients/promtail/stages/docker.md | 3 + .../clients/promtail/stages/json.md | 5 +- .../clients/promtail/stages/labels.md | 3 + .../clients/promtail/stages/match.md | 11 +- .../clients/promtail/stages/metrics.md | 11 +- .../clients/promtail/stages/output.md | 3 + .../clients/promtail/stages/regex.md | 3 + .../clients/promtail/stages/replace.md | 13 +- .../clients/promtail/stages/template.md | 19 +-- .../clients/promtail/stages/tenant.md | 5 +- .../clients/promtail/stages/timestamp.md | 3 + .../clients/promtail/troubleshooting.md | 11 +- docs/sources/community/_index.md | 9 ++ docs/{ => sources}/community/contributing.md | 5 +- .../community/getting-in-touch.md | 3 + docs/{ => sources}/community/governance.md | 5 +- .../configuration/_index.md} | 105 ++++++++-------- docs/{ => sources}/configuration/examples.md | 32 +++-- .../configuration/query-frontend.md | 5 +- .../2020-02-Promtail-Push-API.md | 3 + docs/sources/design-documents/_index.md | 4 + docs/{ => sources}/design-documents/labels.md | 41 +++--- docs/sources/getting-started/_index.md | 11 ++ .../getting-started/get-logs-into-loki.md | 15 ++- docs/{ => sources}/getting-started/grafana.md | 3 + docs/{ => sources}/getting-started/labels.md | 7 +- docs/{ => sources}/getting-started/logcli.md | 3 + .../getting-started/troubleshooting.md | 9 +- .../installation/_index.md} | 14 ++- docs/{ => sources}/installation/docker.md | 3 + docs/{ => sources}/installation/helm.md | 7 +- .../installation/install-from-source.md | 3 + docs/{ => sources}/installation/local.md | 7 +- docs/{ => sources}/installation/tanka.md | 9 +- docs/{ => sources}/logo.png | Bin docs/{ => sources}/logo_and_name.png | Bin docs/{logql.md => sources/logql/_index.md} | 8 +- docs/sources/maintaining/_index.md | 10 ++ .../maintaining/release-loki-build-image.md | 5 +- docs/{ => sources}/maintaining/release.md | 3 + docs/sources/operations/_index.md | 15 +++ .../operations/authentication.md | 7 +- docs/{ => sources}/operations/canary.png | Bin .../operations/loki-canary-block.png | Bin docs/{ => sources}/operations/loki-canary.md | 23 ++-- .../{ => sources}/operations/multi-tenancy.md | 3 + .../{ => sources}/operations/observability.md | 7 +- docs/{ => sources}/operations/scalability.md | 5 +- .../operations/storage/_index.md} | 15 ++- .../operations/storage/boltdb-shipper.md | 5 +- .../operations/storage/filesystem.md | 9 +- .../operations/storage/retention.md | 15 ++- ...able-manager-active-vs-inactive-tables.png | Bin .../storage/table-manager-periodic-tables.png | Bin .../storage/table-manager-retention.png | Bin .../operations/storage/table-manager.md | 27 ++-- docs/{ => sources}/operations/upgrade.md | 11 +- .../README.md => sources/overview/_index.md} | 10 +- docs/{ => sources}/overview/comparisons.md | 5 +- 101 files changed, 751 insertions(+), 449 deletions(-) create mode 100644 docs/Makefile delete mode 100644 docs/best-practices/README.md delete mode 100644 docs/clients/promtail/stages/README.md delete mode 100644 docs/community/README.md delete mode 100644 docs/getting-started/README.md delete mode 100644 docs/maintaining/README.md delete mode 100644 docs/operations/README.md create mode 100644 docs/sources/.DS_Store create mode 100644 docs/sources/_index.md rename docs/{api.md => sources/api/_index.md} (87%) rename docs/{architecture.md => sources/architecture/_index.md} (98%) rename docs/{ => sources/architecture}/modes_of_operation.png (100%) create mode 100644 docs/sources/best-practices/_index.md rename docs/{ => sources}/best-practices/current-best-practices.md (96%) rename docs/{ => sources}/chunks_diagram.png (100%) rename docs/{clients/README.md => sources/clients/_index.md} (90%) create mode 100644 docs/sources/clients/aws/_index.md rename docs/{clients/aws/ec2/ec2.md => sources/clients/aws/ec2/_index.md} (91%) rename docs/{ => sources}/clients/aws/ec2/promtail-ec2-discovery.png (100%) rename docs/{ => sources}/clients/aws/ec2/promtail-ec2-final.yaml (100%) rename docs/{ => sources}/clients/aws/ec2/promtail-ec2-logs.png (100%) rename docs/{ => sources}/clients/aws/ec2/promtail-ec2.yaml (100%) rename docs/{ => sources}/clients/aws/ec2/promtail.service (100%) rename docs/{clients/aws/ecs/ecs.md => sources/clients/aws/ecs/_index.md} (97%) rename docs/{ => sources}/clients/aws/ecs/ecs-grafana.png (100%) rename docs/{ => sources}/clients/aws/ecs/ecs-role.json (100%) rename docs/{ => sources}/clients/aws/ecs/ecs-task.json (100%) rename docs/{clients/aws/eks/promtail-eks.md => sources/clients/aws/eks/_index.md} (95%) rename docs/{ => sources}/clients/aws/eks/eventrouter.yaml (100%) rename docs/{ => sources}/clients/aws/eks/namespace-grafana.png (100%) rename docs/{ => sources}/clients/aws/eks/values.yaml (100%) rename docs/{clients/docker-driver/README.md => sources/clients/docker-driver/_index.md} (91%) rename docs/{ => sources}/clients/docker-driver/configuration.md (98%) rename docs/{clients/fluentd/README.md => sources/clients/fluentd/_index.md} (98%) rename docs/{clients/promtail/README.md => sources/clients/promtail/_index.md} (90%) rename docs/{ => sources}/clients/promtail/configuration.md (93%) rename docs/{ => sources}/clients/promtail/installation.md (98%) rename docs/{ => sources}/clients/promtail/pipelines.md (86%) rename docs/{ => sources}/clients/promtail/scraping.md (99%) create mode 100644 docs/sources/clients/promtail/stages/_index.md rename docs/{ => sources}/clients/promtail/stages/cri.md (98%) rename docs/{ => sources}/clients/promtail/stages/docker.md (97%) rename docs/{ => sources}/clients/promtail/stages/json.md (96%) rename docs/{ => sources}/clients/promtail/stages/labels.md (97%) rename docs/{ => sources}/clients/promtail/stages/match.md (94%) rename docs/{ => sources}/clients/promtail/stages/metrics.md (98%) rename docs/{ => sources}/clients/promtail/stages/output.md (97%) rename docs/{ => sources}/clients/promtail/stages/regex.md (99%) rename docs/{ => sources}/clients/promtail/stages/replace.md (97%) rename docs/{ => sources}/clients/promtail/stages/template.md (93%) rename docs/{ => sources}/clients/promtail/stages/tenant.md (95%) rename docs/{ => sources}/clients/promtail/stages/timestamp.md (99%) rename docs/{ => sources}/clients/promtail/troubleshooting.md (93%) create mode 100644 docs/sources/community/_index.md rename docs/{ => sources}/community/contributing.md (92%) rename docs/{ => sources}/community/getting-in-touch.md (94%) rename docs/{ => sources}/community/governance.md (99%) rename docs/{configuration/README.md => sources/configuration/_index.md} (96%) rename docs/{ => sources}/configuration/examples.md (87%) rename docs/{ => sources}/configuration/query-frontend.md (98%) rename docs/{ => sources}/design-documents/2020-02-Promtail-Push-API.md (99%) create mode 100644 docs/sources/design-documents/_index.md rename docs/{ => sources}/design-documents/labels.md (95%) create mode 100644 docs/sources/getting-started/_index.md rename docs/{ => sources}/getting-started/get-logs-into-loki.md (89%) rename docs/{ => sources}/getting-started/grafana.md (97%) rename docs/{ => sources}/getting-started/labels.md (98%) rename docs/{ => sources}/getting-started/logcli.md (99%) rename docs/{ => sources}/getting-started/troubleshooting.md (95%) rename docs/{installation/README.md => sources/installation/_index.md} (58%) rename docs/{ => sources}/installation/docker.md (99%) rename docs/{ => sources}/installation/helm.md (96%) rename docs/{ => sources}/installation/install-from-source.md (94%) rename docs/{ => sources}/installation/local.md (90%) rename docs/{ => sources}/installation/tanka.md (99%) rename docs/{ => sources}/logo.png (100%) rename docs/{ => sources}/logo_and_name.png (100%) rename docs/{logql.md => sources/logql/_index.md} (99%) create mode 100644 docs/sources/maintaining/_index.md rename docs/{ => sources}/maintaining/release-loki-build-image.md (69%) rename docs/{ => sources}/maintaining/release.md (99%) create mode 100644 docs/sources/operations/_index.md rename docs/{ => sources}/operations/authentication.md (82%) rename docs/{ => sources}/operations/canary.png (100%) rename docs/{ => sources}/operations/loki-canary-block.png (100%) rename docs/{ => sources}/operations/loki-canary.md (97%) rename docs/{ => sources}/operations/multi-tenancy.md (95%) rename docs/{ => sources}/operations/observability.md (97%) rename docs/{ => sources}/operations/scalability.md (76%) rename docs/{operations/storage/README.md => sources/operations/storage/_index.md} (91%) rename docs/{ => sources}/operations/storage/boltdb-shipper.md (99%) rename docs/{ => sources}/operations/storage/filesystem.md (93%) rename docs/{ => sources}/operations/storage/retention.md (80%) rename docs/{ => sources}/operations/storage/table-manager-active-vs-inactive-tables.png (100%) rename docs/{ => sources}/operations/storage/table-manager-periodic-tables.png (100%) rename docs/{ => sources}/operations/storage/table-manager-retention.png (100%) rename docs/{ => sources}/operations/storage/table-manager.md (89%) rename docs/{ => sources}/operations/upgrade.md (97%) rename docs/{overview/README.md => sources/overview/_index.md} (97%) rename docs/{ => sources}/overview/comparisons.md (98%) diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000000..2ad333008187 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,11 @@ +IMAGE = grafana/docs-base:latest + +.PHONY: docs +docs: + docker pull ${IMAGE} + docker run -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 --rm $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make server' + +.PHONY: docs-test +docs-test: + docker pull ${IMAGE} + docker run -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 --rm $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod' \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index 3829072dad6b..de35bc56d5ee 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,6 +1,6 @@ # Loki Documentation -

Loki Logo
+

Loki Logo
Like Prometheus, but for logs!

Grafana Loki is a set of components that can be composed into a fully featured @@ -14,49 +14,49 @@ simplifies the operation and significantly lowers the cost of Loki. ## Table of Contents -1. [Overview](overview/README.md) - 1. [Comparison to other Log Systems](overview/comparisons.md) -2. [Installation](installation/README.md) - 1. [Installing with Tanka](installation/tanka.md) - 2. [Installing with Helm](installation/helm.md) - 3. [Installing with Docker](installation/docker.md) - 4. [Installing locally](installation/local.md) -3. [Getting Started](getting-started/README.md) - 1. [Grafana](getting-started/grafana.md) - 2. [LogCLI](getting-started/logcli.md) - 3. [Labels](getting-started/labels.md) - 4. [Troubleshooting](getting-started/troubleshooting.md) -4. [Best Practices](best-practices/README.md) - 1. [Current Best Practices](best-practices/current-best-practices.md) -5. [Configuration](configuration/README.md) - 1. [Examples](configuration/examples.md) -6. [Clients](clients/README.md) - 1. [Promtail](clients/promtail/README.md) - 1. [Installation](clients/promtail/installation.md) - 2. [Configuration](clients/promtail/configuration.md) - 3. [Scraping](clients/promtail/scraping.md) - 4. [Pipelines](clients/promtail/pipelines.md) - 5. [Troubleshooting](clients/promtail/troubleshooting.md) - 2. [Docker Driver](clients/docker-driver/README.md) - 1. [Configuration](clients/docker-driver/configuration.md) - 4. [Fluent Bit](../cmd/fluent-bit/README.md) - 3. [Fluentd](clients/fluentd/README.md) -7. [LogQL](logql.md) -8. [Operations](operations/README.md) - 1. [Authentication](operations/authentication.md) - 2. [Observability](operations/observability.md) - 3. [Scalability](operations/scalability.md) - 4. [Storage](operations/storage/README.md) - 1. [Table Manager](operations/storage/table-manager.md) - 2. [Retention](operations/storage/retention.md) - 3. [BoltDB Shipper](operations/storage/boltdb-shipper.md) - 5. [Multi-tenancy](operations/multi-tenancy.md) - 6. [Loki Canary](operations/loki-canary.md) -9. [HTTP API](api.md) -10. [Architecture](architecture.md) -11. [Community](community/README.md) - 1. [Governance](community/governance.md) - 2. [Getting in Touch](community/getting-in-touch.md) - 3. [Contributing to Loki](community/contributing.md) -12. [Loki Maintainers Guide](./maintaining/README.md) - 1. [Releasing Loki](./maintaining/release.md) +1. [Overview](sources/overview/_index.md) + 1. [Comparison to other Log Systems](sources/overview/comparisons.md) +2. [Installation](sources/installation/_index.md) + 1. [Installing with Tanka](sources/installation/tanka.md) + 2. [Installing with Helm](sources/installation/helm.md) + 3. [Installing with Docker](sources/installation/docker.md) + 4. [Installing locally](sources/installation/local.md) +3. [Getting Started](sources/getting-started/_index.md) + 1. [Grafana](sources/getting-started/grafana.md) + 2. [LogCLI](sources/getting-started/logcli.md) + 3. [Labels](sources/getting-started/labels.md) + 4. [Troubleshooting](sources/getting-started/troubleshooting.md) +4. [Best Practices](sources/best-practices/_index.md) + 1. [Current Best Practices](sources/best-practices/current-best-practices.md) +5. [Configuration](sources/configuration/_index.md) + 1. [Examples](sources/configuration/examples.md) +6. [Clients](sources/clients/_index.md) + 1. [Promtail](sources/clients/promtail/_index.md) + 1. [Installation](sources/clients/promtail/installation.md) + 2. [Configuration](sources/clients/promtail/configuration.md) + 3. [Scraping](sources/clients/promtail/scraping.md) + 4. [Pipelines](sources/clients/promtail/pipelines.md) + 5. [Troubleshooting](sources/clients/promtail/troubleshooting.md) + 2. [Docker Driver](sources/clients/docker-driver/_index.md) + 1. [Configuration](sources/clients/docker-driver/configuration.md) + 4. [Fluent Bit](sources/../cmd/fluent-bit/_index.md) + 3. [Fluentd](sources/clients/fluentd/_index.md) +7. [LogQL](sources/logql.md) +8. [Operations](sources/operations/_index.md) + 1. [Authentication](sources/operations/authentication.md) + 2. [Observability](sources/operations/observability.md) + 3. [Scalability](sources/operations/scalability.md) + 4. [Storage](sources/operations/storage/_index.md) + 1. [Table Manager](sources/operations/storage/table-manager.md) + 2. [Retention](sources/operations/storage/retention.md) + 3. [BoltDB Shipper](sources/operations/storage/boltdb-shipper.md) + 5. [Multi-tenancy](sources/operations/multi-tenancy.md) + 6. [Loki Canary](sources/operations/loki-canary.md) +9. [HTTP API](sources/api.md) +10. [Architecture](sources/architecture.md) +11. [Community](sources/community/_index.md) + 1. [Governance](sources/community/governance.md) + 2. [Getting in Touch](sources/community/getting-in-touch.md) + 3. [Contributing to Loki](sources/community/contributing.md) +12. [Loki Maintainers Guide](sources/./maintaining/_index.md) + 1. [Releasing Loki](sources/./maintaining/release.md) diff --git a/docs/best-practices/README.md b/docs/best-practices/README.md deleted file mode 100644 index 0694451671a8..000000000000 --- a/docs/best-practices/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Best Practices - -1. [Current Best Practices](current-best-practices.md) includes a (hopefully) current guide for some best practices regarding Label usage and configuration in Loki. diff --git a/docs/clients/promtail/stages/README.md b/docs/clients/promtail/stages/README.md deleted file mode 100644 index d7a169a2a329..000000000000 --- a/docs/clients/promtail/stages/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Stages - -This section is a collection of all stages Promtail supports in a -[Pipeline](../pipelines.md). - -Parsing stages: - - * [docker](./docker.md): Extract data by parsing the log line using the standard Docker format. - * [cri](./cri.md): Extract data by parsing the log line using the standard CRI format. - * [regex](./regex.md): Extract data using a regular expression. - * [json](./json.md): Extract data by parsing the log line as JSON. - * [replace](./replace.md): Replace data using a regular expression. - -Transform stages: - - * [template](./template.md): Use Go templates to modify extracted data. - -Action stages: - - * [timestamp](./timestamp.md): Set the timestamp value for the log entry. - * [output](./output.md): Set the log line text. - * [labels](./labels.md): Update the label set for the log entry. - * [metrics](./metrics.md): Calculate metrics based on extracted data. - * [tenant](./tenant.md): Set the tenant ID value to use for the log entry. - -Filtering stages: - - * [match](./match.md): Conditionally run stages based on the label set. - diff --git a/docs/community/README.md b/docs/community/README.md deleted file mode 100644 index 22a780f65ab8..000000000000 --- a/docs/community/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Community - -1. [Governance](./governance.md) -2. [Getting in Touch](./getting-in-touch.md) -3. [Contributing](./contributing.md) diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md deleted file mode 100644 index e92e2a2e6696..000000000000 --- a/docs/getting-started/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Getting started with Loki - -1. [Grafana](grafana.md) -2. [LogCLI](logcli.md) -3. [Labels](labels.md) -4. [Troubleshooting](troubleshooting.md) - diff --git a/docs/maintaining/README.md b/docs/maintaining/README.md deleted file mode 100644 index fca08fe4d226..000000000000 --- a/docs/maintaining/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Loki Maintainers Guide - -This section details information for maintainers of Loki. - -1. [Releasing Loki](./release.md) -2. [Releasing `loki-build-image`](./release-loki-build-image.md) diff --git a/docs/operations/README.md b/docs/operations/README.md deleted file mode 100644 index 31d817aa9238..000000000000 --- a/docs/operations/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Operating Loki - -1. [Upgrading](upgrade.md) -2. [Authentication](authentication.md) -3. [Observability](observability.md) -4. [Scalability](scalability.md) -5. [Storage](storage/README.md) - 1. [Table Manager](storage/table-manager.md) - 2. [Retention](storage/retention.md) -6. [Multi-tenancy](multi-tenancy.md) -7. [Loki Canary](loki-canary.md) diff --git a/docs/sources/.DS_Store b/docs/sources/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..9a621a09f14b9745b8a4c7907cc7608c4e6cf65d GIT binary patch literal 10244 zcmeI2&ubGw6vyA#6r-)RAb8%J3Km*X3l?Op@h;>dDA@e64J6t0HzG*z;!!W+!Mi7a z^y)z<9z;C}{so@IKft@+H#6CtX?8bj52En4?7npNz1h$E&ScUzDG{mV?dAeeKtyGn zY-gu&NjdmFpCiW++j0?BA)Xehji}on@skEoKok%KL;+Di6!>=(z-KleyUeEFi*udJ*+JUjSgF~56SQDyjiJ?q5+5_ts zT01b-gp;zxhq6yrwn9<*>F7Vu?xaFvQlS3>elb=&&i!`RH@WE zQ5`MiuhTDQKGr{6bMH2D?)KN*UiId{G@iFX4O*i%ZBYxawrCqIEssFmy^LpM`npVa zF2zha{t0Cfe~(*f?is3sb`!L7&}#!(FnJqSw0i7xCIEY`zsvt=H|8G`ljKk|#>+!5 z<8&0n9dNg(2Y#RY%FTU{AMkKltG8Ya$A82tq09F2c$W6?FggaFh`RXM1`*GR`@Wis z@6)|puH7H6hg>QBb)$qW-_zrx73Mpk`FeB@Y^!>v*7UeX)blu%n#}ihdA}{b8S>`z z?{XK5@c2~#Abg@W^}J>J9_DTnYB!KaH+2Tk^0Oi_ZZ7K&;#|f)%gyLd;=H`fxST{j zEsO-e2YYxur{9~rw(zRb^u~rB-$qtrBHWywe2;S)8&B-{)Ro?n$sl(0`pezUCLs`*&To#>1G+iFawz zJ^bf~{QN1*Kos~d6i}r~wXzIUySF|9 z&3o+u&J~<|u;0K~9fAwHaJ$})$1l7cf7Z6ef3)L%Xxj$H>TtyAaM-`}WkAM`fm^#2 qujJiIrg)TJ*X*+#nKim!cS-%9{FhnCx@FYqzG_nc$F;cLEBFl)oV5J_ literal 0 HcmV?d00001 diff --git a/docs/sources/_index.md b/docs/sources/_index.md new file mode 100644 index 000000000000..d26dc4a4fc69 --- /dev/null +++ b/docs/sources/_index.md @@ -0,0 +1,19 @@ +--- +title: Loki Documentation +aliases: + - /docs/loki/ +--- + +# Loki Documentation + +

Loki Logo
+ Like Prometheus, but for logs!

+ +Grafana Loki is a set of components that can be composed into a fully featured +logging stack. + +Unlike other logging systems, Loki is built around the idea of only indexing +metadata about your logs: labels (just like Prometheus labels). Log data itself +is then compressed and stored in chunks in object stores such as S3 or GCS, or +even locally on the filesystem. A small index and highly compressed chunks +simplifies the operation and significantly lowers the cost of Loki. \ No newline at end of file diff --git a/docs/api.md b/docs/sources/api/_index.md similarity index 87% rename from docs/api.md rename to docs/sources/api/_index.md index fb1a93cb4ba8..3c14e964fce9 100644 --- a/docs/api.md +++ b/docs/sources/api/_index.md @@ -1,29 +1,46 @@ +--- +title: HTTP API +weight: 900 +--- + # Loki's HTTP API Loki exposes an HTTP API for pushing, querying, and tailing log data. -Note that [authenticating](operations/authentication.md) against the API is +Note that [authenticating](../operations/authentication/) against the API is out of scope for Loki. The HTTP API includes the following endpoints: -- [`GET /loki/api/v1/query`](#get-lokiapiv1query) -- [`GET /loki/api/v1/query_range`](#get-lokiapiv1query_range) -- [`GET /loki/api/v1/labels`](#get-lokiapiv1labels) -- [`GET /loki/api/v1/label//values`](#get-lokiapiv1labelnamevalues) -- [`GET /loki/api/v1/tail`](#get-lokiapiv1tail) -- [`GET /loki/api/v1/series`](#series) -- [`POST /loki/api/v1/series`](#series) -- [`POST /loki/api/v1/push`](#post-lokiapiv1push) -- [`GET /api/prom/tail`](#get-apipromtail) -- [`GET /api/prom/query`](#get-apipromquery) -- [`GET /api/prom/label`](#get-apipromlabel) -- [`GET /api/prom/label//values`](#get-apipromlabelnamevalues) -- [`GET /api/prom/series`](#series) -- [`POST /api/prom/series`](#series) -- [`POST /api/prom/push`](#post-apiprompush) -- [`GET /ready`](#get-ready) -- [`POST /flush`](#post-flush) -- [`GET /metrics`](#get-metrics) +- [Loki's HTTP API](#lokis-http-api) + - [Microservices Mode](#microservices-mode) + - [Matrix, Vector, And Streams](#matrix-vector-and-streams) + - [`GET /loki/api/v1/query`](#get-lokiapiv1query) + - [Examples](#examples) + - [`GET /loki/api/v1/query_range`](#get-lokiapiv1query_range) + - [Step vs Interval](#step-vs-interval) + - [Examples](#examples-1) + - [`GET /loki/api/v1/labels`](#get-lokiapiv1labels) + - [Examples](#examples-2) + - [`GET /loki/api/v1/label//values`](#get-lokiapiv1labelnamevalues) + - [Examples](#examples-3) + - [`GET /loki/api/v1/tail`](#get-lokiapiv1tail) + - [`POST /loki/api/v1/push`](#post-lokiapiv1push) + - [Examples](#examples-4) + - [`GET /api/prom/tail`](#get-apipromtail) + - [`GET /api/prom/query`](#get-apipromquery) + - [Examples](#examples-5) + - [`GET /api/prom/label`](#get-apipromlabel) + - [Examples](#examples-6) + - [`GET /api/prom/label//values`](#get-apipromlabelnamevalues) + - [Examples](#examples-7) + - [`POST /api/prom/push`](#post-apiprompush) + - [Examples](#examples-8) + - [`GET /ready`](#get-ready) + - [`POST /flush`](#post-flush) + - [`GET /metrics`](#get-metrics) + - [Series](#series) + - [Examples](#examples-9) + - [Statistics](#statistics) ## Microservices Mode @@ -37,15 +54,36 @@ These endpoints are exposed by all components: These endpoints are exposed by the querier and the frontend: -- [`GET /loki/api/v1/query`](#get-lokiapiv1query) -- [`GET /loki/api/v1/query_range`](#get-lokiapiv1query_range) -- [`GET /loki/api/v1/labels`](#get-lokiapiv1labels) -- [`GET /loki/api/v1/label//values`](#get-lokiapiv1labelnamevalues) -- [`GET /loki/api/v1/tail`](#get-lokiapiv1tail) -- [`GET /api/prom/tail`](#get-lokiapipromtail) -- [`GET /api/prom/query`](#get-apipromquery) -- [`GET /api/prom/label`](#get-apipromlabel) -- [`GET /api/prom/label//values`](#get-apipromlabelnamevalues) +- [Loki's HTTP API](#lokis-http-api) + - [Microservices Mode](#microservices-mode) + - [Matrix, Vector, And Streams](#matrix-vector-and-streams) + - [`GET /loki/api/v1/query`](#get-lokiapiv1query) + - [Examples](#examples) + - [`GET /loki/api/v1/query_range`](#get-lokiapiv1query_range) + - [Step vs Interval](#step-vs-interval) + - [Examples](#examples-1) + - [`GET /loki/api/v1/labels`](#get-lokiapiv1labels) + - [Examples](#examples-2) + - [`GET /loki/api/v1/label//values`](#get-lokiapiv1labelnamevalues) + - [Examples](#examples-3) + - [`GET /loki/api/v1/tail`](#get-lokiapiv1tail) + - [`POST /loki/api/v1/push`](#post-lokiapiv1push) + - [Examples](#examples-4) + - [`GET /api/prom/tail`](#get-apipromtail) + - [`GET /api/prom/query`](#get-apipromquery) + - [Examples](#examples-5) + - [`GET /api/prom/label`](#get-apipromlabel) + - [Examples](#examples-6) + - [`GET /api/prom/label//values`](#get-apipromlabelnamevalues) + - [Examples](#examples-7) + - [`POST /api/prom/push`](#post-apiprompush) + - [Examples](#examples-8) + - [`GET /ready`](#get-ready) + - [`POST /flush`](#post-flush) + - [`GET /metrics`](#get-metrics) + - [Series](#series) + - [Examples](#examples-9) + - [Statistics](#statistics) While these endpoints are exposed by just the distributor: @@ -57,7 +95,7 @@ And these endpoints are exposed by just the ingester: The API endpoints starting with `/loki/` are [Prometheus API-compatible](https://prometheus.io/docs/prometheus/latest/querying/api/) and the result formats can be used interchangeably. -A [list of clients](./clients) can be found in the clients documentation. +A [list of clients](../clients) can be found in the clients documentation. ## Matrix, Vector, And Streams @@ -81,7 +119,7 @@ Some Loki API endpoints return a result of a matrix, a vector, or a stream: `/loki/api/v1/query` allows for doing queries against a single point in time. The URL query parameters support the following values: -- `query`: The [LogQL](./logql.md) query to perform +- `query`: The [LogQL](../logql/) query to perform - `limit`: The max number of entries to return - `time`: The evaluation time for the query as a nanosecond Unix epoch. Defaults to now. - `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward.` @@ -213,7 +251,7 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/query" --data-urlencode 'query= `/loki/api/v1/query_range` is used to do a query over a range of time and accepts the following query parameters in the URL: -- `query`: The [LogQL](./logql.md) query to perform +- `query`: The [LogQL](../logql/) query to perform - `limit`: The max number of entries to return - `start`: The start time for the query as a nanosecond Unix epoch. Defaults to one hour ago. - `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now. @@ -444,7 +482,7 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/label/foo/values" | jq `/loki/api/v1/tail` is a WebSocket endpoint that will stream log messages based on a query. It accepts the following query parameters in the URL: -- `query`: The [LogQL](./logql.md) query to perform +- `query`: The [LogQL](../logql/) query to perform - `delay_for`: The number of seconds to delay retrieving logs to let slow loggers catch up. Defaults to 0 and cannot be larger than 5. - `limit`: The max number of entries to return @@ -485,8 +523,8 @@ Response (streamed): `/loki/api/v1/push` is the endpoint used to send log entries to Loki. The default behavior is for the POST body to be a snappy-compressed protobuf message: -- [Protobuf definition](/pkg/logproto/logproto.proto) -- [Go client library](/pkg/promtail/client/client.go) +- [Protobuf definition](https://github.com/grafana/loki/tree/master/pkg/logproto/logproto.proto) +- [Go client library](https://github.com/grafana/loki/tree/master/pkg/promtail/client/client.go) Alternatively, if the `Content-Type` header is set to `application/json`, a JSON post body can be sent in the following format: @@ -513,7 +551,7 @@ JSON post body can be sent in the following format: > recent received log, it is rejected with an out of order error. If a log > is received with the same timestamp and content as the most recent log, it is > silently ignored. For more details on the ordering rules, refer to the -> [Loki Overview docs](./overview/README.md#timestamp-ordering). +> [Loki Overview docs](../overview#timestamp-ordering). In microservices mode, `/loki/api/v1/push` is exposed by the distributor. @@ -532,7 +570,7 @@ $ curl -v -H "Content-Type: application/json" -XPOST -s "http://localhost:3100/l `/api/prom/tail` is a WebSocket endpoint that will stream log messages based on a query. It accepts the following query parameters in the URL: -- `query`: The [LogQL](./logql.md) query to perform +- `query`: The [LogQL](../logql/) query to perform - `delay_for`: The number of seconds to delay retrieving logs to let slow loggers catch up. Defaults to 0 and cannot be larger than 5. - `limit`: The max number of entries to return @@ -581,7 +619,7 @@ will be sent over the WebSocket multiple times. `/api/prom/query` supports doing general queries. The URL query parameters support the following values: -- `query`: The [LogQL](./logql.md) query to perform +- `query`: The [LogQL](../logql/) query to perform - `limit`: The max number of entries to return - `start`: The start time for the query as a nanosecond Unix epoch. Defaults to one hour ago. - `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now. @@ -723,8 +761,8 @@ $ curl -G -s "http://localhost:3100/api/prom/label/foo/values" | jq `/api/prom/push` is the endpoint used to send log entries to Loki. The default behavior is for the POST body to be a snappy-compressed protobuf message: -- [Protobuf definition](/pkg/logproto/logproto.proto) -- [Go client library](/pkg/promtail/client/client.go) +- [Protobuf definition](https://github.com/grafana/loki/tree/master/pkg/logproto/logproto.proto) +- [Go client library](https://github.com/grafana/loki/tree/master/pkg/promtail/client/client.go) Alternatively, if the `Content-Type` header is set to `application/json`, a JSON post body can be sent in the following format: @@ -776,7 +814,7 @@ In microservices mode, the `/flush` endpoint is exposed by the ingester. ## `GET /metrics` `/metrics` exposes Prometheus metrics. See -[Observing Loki](operations/observability.md) +[Observing Loki](../operations/observability/) for a list of exported metrics. In microservices mode, the `/metrics` endpoint is exposed by all components. diff --git a/docs/architecture.md b/docs/sources/architecture/_index.md similarity index 98% rename from docs/architecture.md rename to docs/sources/architecture/_index.md index 3c85ee68a2b9..b1fce256be52 100644 --- a/docs/architecture.md +++ b/docs/sources/architecture/_index.md @@ -1,7 +1,11 @@ +--- +title: Architecture +weight: 1000 +--- # Loki's Architecture This document will expand on the information detailed in the [Loki -Overview](overview/README.md). +Overview](../overview/). ## Multi Tenancy @@ -152,7 +156,7 @@ deduplicated. The ingesters validate timestamps for each log line received maintains a strict ordering. See the [Loki -Overview](./overview/README.md#timestamp-ordering) for detailed documentation on +Overview](../overview#timestamp-ordering) for detailed documentation on the rules of timestamp order. #### Handoff @@ -202,7 +206,7 @@ Caching log (filter, regexp) queries are under active development. ### Querier -The **querier** service handles queries using the [LogQL](./logql.md) query +The **querier** service handles queries using the [LogQL](../logql/) query language, fetching logs both from the ingesters and long-term storage. Queriers query all ingesters for in-memory data before falling back to diff --git a/docs/modes_of_operation.png b/docs/sources/architecture/modes_of_operation.png similarity index 100% rename from docs/modes_of_operation.png rename to docs/sources/architecture/modes_of_operation.png diff --git a/docs/sources/best-practices/_index.md b/docs/sources/best-practices/_index.md new file mode 100644 index 000000000000..6ea685796a10 --- /dev/null +++ b/docs/sources/best-practices/_index.md @@ -0,0 +1,7 @@ +--- +title: Best practices +weight: 400 +--- +# Best Practices + +1. [Current Best Practices](current-best-practices/) includes a (hopefully) current guide for some best practices regarding Label usage and configuration in Loki. diff --git a/docs/best-practices/current-best-practices.md b/docs/sources/best-practices/current-best-practices.md similarity index 96% rename from docs/best-practices/current-best-practices.md rename to docs/sources/best-practices/current-best-practices.md index 1ef6b436085f..091d32afa761 100644 --- a/docs/best-practices/current-best-practices.md +++ b/docs/sources/best-practices/current-best-practices.md @@ -1,3 +1,6 @@ +--- +title: Current best practices +--- # Loki label best practices Loki is under active development, and we are constantly working to improve performance. But here are some of the most current best practices for labels that will give you the best experience with Loki. @@ -32,7 +35,7 @@ Try to keep values bounded to as small a set as possible. We don't have perfect Loki has several client options: [Promtail](https://github.com/grafana/loki/tree/master/docs/clients/promtail) (which also supports systemd journal ingestion and TCP-based syslog ingestion), [Fluentd](https://github.com/grafana/loki/tree/master/fluentd/fluent-plugin-grafana-loki), [Fluent Bit](https://github.com/grafana/loki/tree/master/cmd/fluent-bit), a [Docker plugin](https://grafana.com/blog/2019/07/15/lokis-path-to-ga-docker-logging-driver-plugin-support-for-systemd/), and more! -Each of these come with ways to configure what labels are applied to create log streams. But be aware of what dynamic labels might be applied. Use the Loki series API to get an idea of what your log streams look like and see if there might be ways to reduce streams and cardinality. Details of the Series API can be found [here](https://github.com/grafana/loki/blob/master/docs/api.md#series), or you can use [logcli](https://github.com/grafana/loki/blob/master/docs/getting-started/logcli.md) to query Loki for series information. +Each of these come with ways to configure what labels are applied to create log streams. But be aware of what dynamic labels might be applied. Use the Loki series API to get an idea of what your log streams look like and see if there might be ways to reduce streams and cardinality. Details of the Series API can be found [here](https://github.com/grafana/loki/blob/master/docs/api.md#series), or you can use [logcli](https://github.com/grafana/loki/blob/master/docs/getting-started/logcli/) to query Loki for series information. ## 5. Configure caching @@ -69,7 +72,7 @@ What can we do about this? What if this was because the sources of these logs we {job=”syslog”, instance=”host2”} 00:00:02 i’m a syslog! <- Accepted, still in order for stream 2 ``` -But what if the application itself generated logs that were out of order? Well, I'm afraid this is a problem. If you are extracting the timestamp from the log line with something like [the promtail pipeline stage](https://github.com/grafana/loki/blob/master/docs/clients/promtail/stages/timestamp.md), you could instead _not_ do this and let Promtail assign a timestamp to the log lines. Or you can hopefully fix it in the application itself. +But what if the application itself generated logs that were out of order? Well, I'm afraid this is a problem. If you are extracting the timestamp from the log line with something like [the promtail pipeline stage](https://github.com/grafana/loki/blob/master/docs/clients/promtail/stages/timestamp/), you could instead _not_ do this and let Promtail assign a timestamp to the log lines. Or you can hopefully fix it in the application itself. But I want Loki to fix this! Why can’t you buffer streams and re-order them for me?! To be honest, because this would add a lot of memory overhead and complication to Loki, and as has been a common thread in this post, we want Loki to be simple and cost-effective. Ideally we would want to improve our clients to do some basic buffering and sorting as this seems a better place to solve this problem. @@ -91,6 +94,6 @@ If you have an application that can log fast enough to fill these chunks quickly Starting in version 1.6.0 Loki and Promtail have flags which will dump the entire config object to stderr, or the log file, when they start. -`-print-config-stderr` is nice when running loki directly e.g. `./loki ` as you can get a quick output of the entire Loki config. +`-print-config-stderr` is nice when running loki directly e.g. `./loki ` as you can get a quick output of the entire Loki config. `-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. \ No newline at end of file diff --git a/docs/chunks_diagram.png b/docs/sources/chunks_diagram.png similarity index 100% rename from docs/chunks_diagram.png rename to docs/sources/chunks_diagram.png diff --git a/docs/clients/README.md b/docs/sources/clients/_index.md similarity index 90% rename from docs/clients/README.md rename to docs/sources/clients/_index.md index 900586c137f2..5d43ce76cb20 100644 --- a/docs/clients/README.md +++ b/docs/sources/clients/_index.md @@ -1,11 +1,15 @@ +--- +title: Clients +weight: 600 +--- # Loki clients Loki supports the following official clients for sending logs: -- [Promtail](./promtail/README.md) -- [Docker Driver](./docker-driver/README.md) -- [Fluentd](./fluentd/README.md) -- [Fluent Bit](../../cmd/fluent-bit/README.md) +- [Promtail](promtail/) +- [Docker Driver](docker-driver/) +- [Fluentd](fluentd/) +- [Fluent Bit](https://github.com/grafana/loki/blob/master/cmd/fluent-bit/README.md) ## Picking a client diff --git a/docs/sources/clients/aws/_index.md b/docs/sources/clients/aws/_index.md new file mode 100644 index 000000000000..287b80355f51 --- /dev/null +++ b/docs/sources/clients/aws/_index.md @@ -0,0 +1,3 @@ +--- +title: AWS +--- \ No newline at end of file diff --git a/docs/clients/aws/ec2/ec2.md b/docs/sources/clients/aws/ec2/_index.md similarity index 91% rename from docs/clients/aws/ec2/ec2.md rename to docs/sources/clients/aws/ec2/_index.md index 5bf94a827491..ab195e481f75 100644 --- a/docs/clients/aws/ec2/ec2.md +++ b/docs/sources/clients/aws/ec2/_index.md @@ -1,15 +1,18 @@ +--- +title: ec2 +--- # Running Promtail on AWS EC2 -In this tutorial we're going to setup [Promtail][promtail] on an AWS EC2 instance and configure it to sends all its logs to a Loki instance. +In this tutorial we're going to setup [Promtail](../../promtail/) on an AWS EC2 instance and configure it to sends all its logs to a Loki instance. - [Running Promtail on AWS EC2](#running-promtail-on-aws-ec2) - - [Requirements](#requirements) - - [Creating an EC2 instance](#creating-an-ec2-instance) - - [Setting up Promtail](#setting-up-promtail) - - [Configuring Promtail as a service](#configuring-promtail-as-a-service) - - [Sending systemd logs](#sending-systemd-logs) + - [Requirements](#requirements) + - [Creating an EC2 instance](#creating-an-ec2-instance) + - [Setting up Promtail](#setting-up-promtail) + - [Configuring Promtail as a service](#configuring-promtail-as-a-service) + - [Sending systemd logs](#sending-systemd-logs) @@ -44,7 +47,7 @@ aws ec2 create-security-group --group-name promtail-ec2 --description "promtail } ``` -Now let's authorize inbound access for SSH and [Promtail][promtail] server: +Now let's authorize inbound access for SSH and [Promtail](../../promtail/) server: ```bash aws ec2 authorize-security-group-ingress --group-id sg-02c489bbdeffdca1d --protocol tcp --port 22 --cidr 0.0.0.0/0 @@ -84,7 +87,7 @@ ssh ec2-user@ec2-13-59-62-37.us-east-2.compute.amazonaws.com ## Setting up Promtail First let's make sure we're running as root by using `sudo -s`. -Next we'll download, install and give executable right to [Promtail][promtail]. +Next we'll download, install and give executable right to [Promtail](../../promtail/). ```bash mkdir /opt/promtail && cd /opt/promtail @@ -93,7 +96,7 @@ unzip "promtail-linux-amd64.zip" chmod a+x "promtail-linux-amd64" ``` -Now we're going to download the [promtail configuration][promtail configuration] file below and edit it, don't worry we will explain what those means. +Now we're going to download the [promtail configuration](../../promtail/) file below and edit it, don't worry we will explain what those means. The file is also available on [github][config gist]. ```bash @@ -136,11 +139,11 @@ scrape_configs: target_label: __host__ ``` -The **server** section indicates promtail to bind his http server to 3100. Promtail serves HTTP pages for [troubleshooting][troubleshooting loki] service discovery and targets. +The **server** section indicates promtail to bind his http server to 3100. Promtail serves HTTP pages for [troubleshooting](../../promtail/troubleshooting) service discovery and targets. The **clients** section allow you to target your loki instance, if you're using GrafanaCloud simply replace `` and `` with your credentials. Otherwise just replace the whole URL with your custom Loki instance.(e.g `http://my-loki-instance.my-org.com/loki/api/v1/push`) -[Promtail][promtail] uses the same [Prometheus **scrape_configs**][prometheus scrape config]. This means if you already own a Prometheus instance the config will be very similar and easy to grasp. +[Promtail](../../promtail/) uses the same [Prometheus **scrape_configs**][prometheus scrape config]. This means if you already own a Prometheus instance the config will be very similar and easy to grasp. Since we're running on AWS EC2 we want to uses EC2 service discovery, this will allows us to scrape metadata about the current instance (and even your custom tags) and attach those to our logs. This way managing and querying on logs will be much easier. @@ -231,7 +234,7 @@ Jul 08 15:48:57 ip-172-31-45-69.us-east-2.compute.internal promtail-linux-amd64[ Jul 08 15:48:57 ip-172-31-45-69.us-east-2.compute.internal promtail-linux-amd64[2732]: level=info ts=2020-07-08T15:48:57.56029474Z caller=main.go:67 msg="Starting Promtail" version="(version=1.5.0, branch=HEAD, revision=12c7eab8)" ``` -You can now verify in Grafana that Loki has correctly received your instance logs using by using the [LogQL][logql] query `{zone="us-east-2"}`. +You can now verify in Grafana that Loki has correctly received your instance logs by using the [LogQL](../../../logql/) query `{zone="us-east-2"}`. ![Grafana Loki logs][ec2 logs] @@ -260,7 +263,7 @@ Note that you can use [relabeling][relabeling] to convert systemd labels to matc That's it, save the config and you can `reboot` the machine (or simply restart the service `systemctl restart promtail.service`). -Let's head back to Grafana and verify that your Promtail logs are available in Grafana by using the [LogQL][logql] query `{unit="promtail.service"}` in Explore. Finally make sure to checkout [live tailing][live tailing] to see logs appearing as they are ingested in Loki. +Let's head back to Grafana and verify that your Promtail logs are available in Grafana by using the [LogQL](../../../logql/) query `{unit="promtail.service"}` in Explore. Finally make sure to checkout [live tailing][live tailing] to see logs appearing as they are ingested in Loki. [promtail]: ../../promtail/README.md [aws cli]: https://aws.amazon.com/cli/ diff --git a/docs/clients/aws/ec2/promtail-ec2-discovery.png b/docs/sources/clients/aws/ec2/promtail-ec2-discovery.png similarity index 100% rename from docs/clients/aws/ec2/promtail-ec2-discovery.png rename to docs/sources/clients/aws/ec2/promtail-ec2-discovery.png diff --git a/docs/clients/aws/ec2/promtail-ec2-final.yaml b/docs/sources/clients/aws/ec2/promtail-ec2-final.yaml similarity index 100% rename from docs/clients/aws/ec2/promtail-ec2-final.yaml rename to docs/sources/clients/aws/ec2/promtail-ec2-final.yaml diff --git a/docs/clients/aws/ec2/promtail-ec2-logs.png b/docs/sources/clients/aws/ec2/promtail-ec2-logs.png similarity index 100% rename from docs/clients/aws/ec2/promtail-ec2-logs.png rename to docs/sources/clients/aws/ec2/promtail-ec2-logs.png diff --git a/docs/clients/aws/ec2/promtail-ec2.yaml b/docs/sources/clients/aws/ec2/promtail-ec2.yaml similarity index 100% rename from docs/clients/aws/ec2/promtail-ec2.yaml rename to docs/sources/clients/aws/ec2/promtail-ec2.yaml diff --git a/docs/clients/aws/ec2/promtail.service b/docs/sources/clients/aws/ec2/promtail.service similarity index 100% rename from docs/clients/aws/ec2/promtail.service rename to docs/sources/clients/aws/ec2/promtail.service diff --git a/docs/clients/aws/ecs/ecs.md b/docs/sources/clients/aws/ecs/_index.md similarity index 97% rename from docs/clients/aws/ecs/ecs.md rename to docs/sources/clients/aws/ecs/_index.md index 24e1990ac235..ec341ecd9a87 100644 --- a/docs/clients/aws/ecs/ecs.md +++ b/docs/sources/clients/aws/ecs/_index.md @@ -1,3 +1,6 @@ +--- +title: ecs +--- # Sending Logs From AWS Elastic Container Service (ECS) [ECS][ECS] is the fully managed container orchestration service by Amazon. Combined with [Fargate][Fargate] you can run your container workload without the need to provision your own compute resources. In this tutorial we will see how you can leverage [Firelens][Firelens] an AWS log router to forward all your logs and your workload metadata to a Loki instance. @@ -7,10 +10,10 @@ After this tutorial you will able to query all your logs in one place using Graf - [Sending Logs From AWS Elastic Container Service (ECS)](#sending-logs-from-aws-elastic-container-service-ecs) - - [Requirements](#requirements) - - [Setting up the ECS cluster](#setting-up-the-ecs-cluster) - - [Creating your task definition](#creating-your-task-definition) - - [Running your service](#running-your-service) + - [Requirements](#requirements) + - [Setting up the ECS cluster](#setting-up-the-ecs-cluster) + - [Creating your task definition](#creating-your-task-definition) + - [Running your service](#running-your-service) diff --git a/docs/clients/aws/ecs/ecs-grafana.png b/docs/sources/clients/aws/ecs/ecs-grafana.png similarity index 100% rename from docs/clients/aws/ecs/ecs-grafana.png rename to docs/sources/clients/aws/ecs/ecs-grafana.png diff --git a/docs/clients/aws/ecs/ecs-role.json b/docs/sources/clients/aws/ecs/ecs-role.json similarity index 100% rename from docs/clients/aws/ecs/ecs-role.json rename to docs/sources/clients/aws/ecs/ecs-role.json diff --git a/docs/clients/aws/ecs/ecs-task.json b/docs/sources/clients/aws/ecs/ecs-task.json similarity index 100% rename from docs/clients/aws/ecs/ecs-task.json rename to docs/sources/clients/aws/ecs/ecs-task.json diff --git a/docs/clients/aws/eks/promtail-eks.md b/docs/sources/clients/aws/eks/_index.md similarity index 95% rename from docs/clients/aws/eks/promtail-eks.md rename to docs/sources/clients/aws/eks/_index.md index b21d8443c8a9..04af82d35be2 100644 --- a/docs/clients/aws/eks/promtail-eks.md +++ b/docs/sources/clients/aws/eks/_index.md @@ -1,4 +1,6 @@ - +--- +title: promtail eks +--- # Sending logs from EKS with Promtail In this tutorial we'll see how setup promtail on [EKS][eks]. Amazon Elastic Kubernetes Service (Amazon [EKS][eks]) is a fully managed Kubernetes service, using Promtail we'll get full visibility into our cluster logs. We'll start by forwarding pods logs then nodes services and finally Kubernetes events. @@ -8,12 +10,12 @@ After this tutorial you will able to query all your logs in one place using Graf - [Sending logs from EKS with Promtail](#sending-logs-from-eks-with-promtail) - - [Requirements](#requirements) - - [Setting up the cluster](#setting-up-the-cluster) - - [Adding Promtail DaemonSet](#adding-promtail-daemonset) - - [Fetching kubelet logs with systemd](#fetching-kubelet-logs-with-systemd) - - [Adding Kubernetes events](#adding-kubernetes-events) - - [Conclusion](#conclusion) + - [Requirements](#requirements) + - [Setting up the cluster](#setting-up-the-cluster) + - [Adding Promtail DaemonSet](#adding-promtail-daemonset) + - [Fetching kubelet logs with systemd](#fetching-kubelet-logs-with-systemd) + - [Adding Kubernetes events](#adding-kubernetes-events) + - [Conclusion](#conclusion) @@ -49,7 +51,7 @@ Server Version: version.Info{Major:"1", Minor:"16+", GitVersion:"v1.16.8-eks-fd1 ## Adding Promtail DaemonSet -To ship all your pods logs we're going to setup [Promtail][Promtail] as a DaemonSet in our cluster. This means it will run on each nodes of the cluster, we will then configure it to find the logs of your containers on the host. +To ship all your pods logs we're going to setup [Promtail](../../promtail/) as a DaemonSet in our cluster. This means it will run on each nodes of the cluster, we will then configure it to find the logs of your containers on the host. What's nice about Promtail is that it uses the same [service discovery as Prometheus][prometheus conf], you should make sure the `scrape_configs` of Promtail matches the Prometheus one. Not only this is simpler to configure, but this also means Metrics and Logs will have the same metadata (labels) attached by the Prometheus service discovery. When querying Grafana you will be able to correlate metrics and logs very quickly, you can read more about this on our [blogpost][correlate]. diff --git a/docs/clients/aws/eks/eventrouter.yaml b/docs/sources/clients/aws/eks/eventrouter.yaml similarity index 100% rename from docs/clients/aws/eks/eventrouter.yaml rename to docs/sources/clients/aws/eks/eventrouter.yaml diff --git a/docs/clients/aws/eks/namespace-grafana.png b/docs/sources/clients/aws/eks/namespace-grafana.png similarity index 100% rename from docs/clients/aws/eks/namespace-grafana.png rename to docs/sources/clients/aws/eks/namespace-grafana.png diff --git a/docs/clients/aws/eks/values.yaml b/docs/sources/clients/aws/eks/values.yaml similarity index 100% rename from docs/clients/aws/eks/values.yaml rename to docs/sources/clients/aws/eks/values.yaml diff --git a/docs/clients/docker-driver/README.md b/docs/sources/clients/docker-driver/_index.md similarity index 91% rename from docs/clients/docker-driver/README.md rename to docs/sources/clients/docker-driver/_index.md index eeef508818c6..bdf810255d51 100644 --- a/docs/clients/docker-driver/README.md +++ b/docs/sources/clients/docker-driver/_index.md @@ -1,3 +1,6 @@ +--- +title: Docker driver +--- # Docker Driver Client Loki officially supports a Docker plugin that will read logs from Docker @@ -8,7 +11,7 @@ to a private Loki instance or [Grafana Cloud](https://grafana.com/oss/loki). > [Docker docs](https://docs.docker.com/engine/extend) for more information. Documentation on configuring the Loki Docker Driver can be found on the -[configuration page](./configuration.md). +[configuration page](../../configuration/). ## Installing @@ -30,7 +33,7 @@ ID NAME DESCRIPTION ENABLED ac720b8fcfdb loki Loki Logging Driver true ``` -Once the plugin is installed it can be [configured](./configuration.md). +Once the plugin is installed it can be [configured](../../configuration/). ## Upgrading @@ -54,5 +57,5 @@ docker plugin rm loki ## Amazon ECS The Docker driver is not currently supported on [Amazon ECS](https://aws.amazon.com/ecs/), although you can work around this if you are using EC2 based ECS (as opposed to Fargate based ECS). -The solution suggested in the [LogConfiguration Documentation](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) is to fork the ECS agent and modify it to work with your log driver of choice. +The solution suggested in the [LogConfiguration Documentation](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) is to fork the ECS agent and modify it to work with your log driver of choice. The other option is to configure the Loki Docker driver as the default Docker logging driver, and then specify no logging configuration within the ECS task. diff --git a/docs/clients/docker-driver/configuration.md b/docs/sources/clients/docker-driver/configuration.md similarity index 98% rename from docs/clients/docker-driver/configuration.md rename to docs/sources/clients/docker-driver/configuration.md index e106c4d59efc..ef4a04903799 100644 --- a/docs/clients/docker-driver/configuration.md +++ b/docs/sources/clients/docker-driver/configuration.md @@ -1,3 +1,6 @@ +--- +title: Configuration +--- # Configuring the Docker Driver The Docker daemon on each machine has a default logging driver and @@ -123,7 +126,7 @@ The following are all supported options that the Loki logging driver supports: | `loki-min-backoff` | No | `100ms` | The minimum amount of time to wait before retrying a batch. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | `loki-max-backoff` | No | `10s` | The maximum amount of time to wait before retrying a batch. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | `loki-retries` | No | `10` | The maximum amount of retries for a log batch. -| `loki-pipeline-stage-file` | No | | The location of a pipeline stage configuration file. Pipeline stages allows to parse log lines to extract more labels. [See the Promtail documentation for more info.](../promtail/pipelines.md) +| `loki-pipeline-stage-file` | No | | The location of a pipeline stage configuration file. Pipeline stages allows to parse log lines to extract more labels. [See the Promtail documentation for more info.](../../promtail/pipelines/) | `loki-tls-ca-file` | No | | Set the path to a custom certificate authority. | `loki-tls-cert-file` | No | | Set the path to a client certificate file. | `loki-tls-key-file` | No | | Set the path to a client key. diff --git a/docs/clients/fluentd/README.md b/docs/sources/clients/fluentd/_index.md similarity index 98% rename from docs/clients/fluentd/README.md rename to docs/sources/clients/fluentd/_index.md index d192653fefb4..b8114bdb5f8a 100644 --- a/docs/clients/fluentd/README.md +++ b/docs/sources/clients/fluentd/_index.md @@ -1,3 +1,6 @@ +--- +title: Fulentd +--- # Fluentd Loki has a [Fluentd](https://www.fluentd.org/) output plugin called @@ -123,7 +126,7 @@ There is a Docker image `grafana/fluent-plugin-loki:master` which contains [defa This image also uses `LOKI_URL`, `LOKI_USERNAME`, and `LOKI_PASSWORD` environment variables to specify the Loki's endpoint, user, and password (you can leave the USERNAME and PASSWORD blank if they're not used). -This image will start an instance of Fluentd to forward incoming logs to the specified Loki url. As an alternate, containerized applications can also use [docker driver plugin](../docker-driver/README.md) to ship logs without needing Fluentd. +This image will start an instance of Fluentd to forward incoming logs to the specified Loki url. As an alternate, containerized applications can also use [docker driver plugin](../docker-driver/) to ship logs without needing Fluentd. ### Example @@ -163,7 +166,7 @@ services: ## Configuration ### url -The url of the Loki server to send logs to. When sending data the publish path (`/api/prom/push`) will automatically be appended. +The url of the Loki server to send logs to. When sending data the publish path (../api/prom/push`) will automatically be appended. By default the url is set to `https://logs-us-west1.grafana.net`, the url of the Grafana Labs preview (hosted Loki)[https://grafana.com/loki] service. #### Proxy Support @@ -176,7 +179,7 @@ If using the GrafanaLab's hosted Loki, the username needs to be set to your inst ### tenant Loki is a multi-tenant log storage platform and all requests sent must include a tenant. For some installations the tenant will be set automatically by an authenticating proxy. Otherwise you can define a tenant to be passed through. -The tenant can be any string value. +The tenant can be any string value. The tenant field also supports placeholders, so it can dynamically change based on tag and record fields. Each placeholder must be added as a buffer chunk key. The following is an example of setting the tenant based on a k8s pod label: diff --git a/docs/clients/promtail/README.md b/docs/sources/clients/promtail/_index.md similarity index 90% rename from docs/clients/promtail/README.md rename to docs/sources/clients/promtail/_index.md index 19f72532e68d..b541c419754c 100644 --- a/docs/clients/promtail/README.md +++ b/docs/sources/clients/promtail/_index.md @@ -1,3 +1,6 @@ +--- +title: Promtail +--- # Promtail Promtail is an agent which ships the contents of local logs to a private Loki @@ -30,11 +33,11 @@ Kubernetes API server while `static` usually covers all other use cases. Just like Prometheus, `promtail` is configured using a `scrape_configs` stanza. `relabel_configs` allows for fine-grained control of what to ingest, what to drop, and the final metadata to attach to the log line. Refer to the docs for -[configuring Promtail](configuration.md) for more details. +[configuring Promtail](configuration/) for more details. ## Loki Push API -Promtail can also be configured to receive logs from another Promtail or any Loki client by exposing the [Loki Push API](../../api.md#post-lokiapiv1push) with the [loki_push_api](./configuration.md#loki_push_api_config) scrape config. +Promtail can also be configured to receive logs from another Promtail or any Loki client by exposing the [Loki Push API](../../api#post-lokiapiv1push) with the [loki_push_api](configuration#loki_push_api_config) scrape config. There are a few instances where this might be helpful: @@ -44,12 +47,12 @@ There are a few instances where this might be helpful: ## Receiving logs From Syslog -When the [Syslog Target](./configuration.md#syslog_config) is being used, logs +When the [Syslog Target](configuration#syslog_config) is being used, logs can be written with the syslog protocol to the configured port. ## AWS -If you need to run Promtail on Amazon Web Services EC2 instances, you can use our [detailed tutorial](../aws/ec2/ec2.md). +If you need to run Promtail on Amazon Web Services EC2 instances, you can use our [detailed tutorial](../aws/ec2/). ## Labeling and parsing @@ -62,7 +65,7 @@ To allow more sophisticated filtering afterwards, Promtail allows to set labels not only from service discovery, but also based on the contents of each log line. The `pipeline_stages` can be used to add or update labels, correct the timestamp, or re-write log lines entirely. Refer to the documentation for -[pipelines](pipelines.md) for more details. +[pipelines](pipelines/) for more details. ## Shipping @@ -88,7 +91,7 @@ This endpoint returns 200 when Promtail is up and running, and there's at least ### `GET /metrics` This endpoint returns Promtail metrics for Prometheus. See -"[Operations > Observability](../../operations/observability.md)" to get a list +"[Operations > Observability](../../operations/observability/)" to get a list of exported metrics. ### Promtail web server config diff --git a/docs/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md similarity index 93% rename from docs/clients/promtail/configuration.md rename to docs/sources/clients/promtail/configuration.md index 853a73cade8a..2b7848d3ef6e 100644 --- a/docs/clients/promtail/configuration.md +++ b/docs/sources/clients/promtail/configuration.md @@ -1,42 +1,54 @@ +--- +title: Configuration +--- # Configuring Promtail Promtail is configured in a YAML file (usually referred to as `config.yaml`) which contains information on the Promtail server, where positions are stored, and how to scrape logs from files. -* [Printing Promtail Config At Runtime](#printing-promtail-config-at-runtime) -* [Configuration File Reference](#configuration-file-reference) -* [server_config](#server_config) -* [client_config](#client_config) -* [position_config](#position_config) -* [scrape_config](#scrape_config) - * [pipeline_stages](#pipeline_stages) - * [docker](#docker) - * [cri](#cri) - * [regex](#regex) - * [json](#json) - * [template](#template) - * [match](#match) - * [timestamp](#timestamp) - * [output](#output) - * [labels](#labels) - * [metrics](#metrics) - * [counter](#counter) - * [gauge](#gauge) - * [histogram](#histogram) - * [tenant](#tenant) - * [journal_config](#journal_config) - * [syslog_config](#syslog_config) - * [loki_push_api_config](#loki_push_api_config) - * [relabel_config](#relabel_config) - * [static_config](#static_config) - * [file_sd_config](#file_sd_config) - * [kubernetes_sd_config](#kubernetes_sd_config) -* [target_config](#target_config) -* [Example Docker Config](#example-docker-config) -* [Example Static Config](#example-static-config) -* [Example Journal Config](#example-journal-config) -* [Example Syslog Config](#example-syslog-config) +- [Configuring Promtail](#configuring-promtail) + - [Printing Promtail Config At Runtime](#printing-promtail-config-at-runtime) + - [Configuration File Reference](#configuration-file-reference) + - [server_config](#server_config) + - [client_config](#client_config) + - [position_config](#position_config) + - [scrape_config](#scrape_config) + - [pipeline_stages](#pipeline_stages) + - [docker](#docker) + - [cri](#cri) + - [regex](#regex) + - [json](#json) + - [template](#template) + - [match](#match) + - [timestamp](#timestamp) + - [output](#output) + - [labels](#labels) + - [metrics](#metrics) + - [counter](#counter) + - [gauge](#gauge) + - [histogram](#histogram) + - [tenant](#tenant) + - [journal_config](#journal_config) + - [syslog_config](#syslog_config) + - [Available Labels](#available-labels) + - [loki_push_api_config](#loki_push_api_config) + - [relabel_config](#relabel_config) + - [static_config](#static_config) + - [file_sd_config](#file_sd_config) + - [kubernetes_sd_config](#kubernetes_sd_config) + - [`node`](#node) + - [`service`](#service) + - [`pod`](#pod) + - [`endpoints`](#endpoints) + - [`ingress`](#ingress) + - [target_config](#target_config) + - [Example Docker Config](#example-docker-config) + - [Example Static Config](#example-static-config) + - [Example Static Config without targets](#example-static-config-without-targets) + - [Example Journal Config](#example-journal-config) + - [Example Syslog Config](#example-syslog-config) + - [Example Push Config](#example-push-config) ## Printing Promtail Config At Runtime @@ -51,9 +63,9 @@ Some values may not be relevant to your install, this is expected as every optio This config is what Promtail will use to run, it can be invaluable for debugging issues related to configuration and is especially useful in making sure your config files and flags are being read and loaded properly. -`-print-config-stderr` is nice when running Promtail directly e.g. `./promtail ` as you can get a quick output of the entire Promtail config. +`-print-config-stderr` is nice when running Promtail directly e.g. `./promtail ` as you can get a quick output of the entire Promtail config. -`-log-config-reverse-order` is the flag we run Promtail with in all our environments, the config entries are reversed so +`-log-config-reverse-order` is the flag we run Promtail with in all our environments, the config entries are reversed so that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. @@ -65,8 +77,8 @@ defined by the schema below. Brackets indicate that a parameter is optional. For non-list parameters the value is set to the specified default. For more detailed information on configuring how to discover and scrape logs from -targets, see [Scraping](scraping.md). For more information on transforming logs -from scraped targets, see [Pipelines](pipelines.md). +targets, see [Scraping](../scraping/). For more information on transforming logs +from scraped targets, see [Pipelines](../pipelines/). Generic placeholders are defined as follows: @@ -315,10 +327,10 @@ kubernetes_sd_configs: ### pipeline_stages -The [pipeline](./pipelines.md) stages (`pipeline_stages`) is used to transform +The [pipeline](../pipelines/) stages (`pipeline_stages`) is used to transform log entries and their labels after discovery and consists of a list of any of the items listed below. -Stages serve several purposes, more detail can be found [here](./pipelines.md), however generally you extract data with `regex` or `json` stages into a temporary map which can then be use as `labels` or `output` or any of the other stages aside from `docker` and `cri` which are explained in more detail below. +Stages serve several purposes, more detail can be found [here](../pipelines/), however generally you extract data with `regex` or `json` stages into a temporary map which can then be use as `labels` or `output` or any of the other stages aside from `docker` and `cri` which are explained in more detail below. ```yaml - [ @@ -460,7 +472,7 @@ template: #### match The match stage conditionally executes a set of stages when a log entry matches -a configurable [LogQL](../../logql.md) stream selector. +a configurable [LogQL](../../../logql/) stream selector. ```yaml match: @@ -704,8 +716,8 @@ promtail needs to wait for the next message to catch multi-line messages, therefore delays between messages can occur. See recommended output configurations for -[syslog-ng](scraping.md#syslog-ng-output-configuration) and -[rsyslog](scraping.md#rsyslog-output-configuration). Both configurations enable +[syslog-ng](../scraping#syslog-ng-output-configuration) and +[rsyslog](../scraping#rsyslog-output-configuration). Both configurations enable IETF Syslog with octet-counting. You may need to increase the open files limit for the promtail process @@ -742,7 +754,7 @@ labels: ### loki_push_api_config -The `loki_push_api_config` block configures Promtail to expose a [Loki push API](../../api.md#post-lokiapiv1push) server. +The `loki_push_api_config` block configures Promtail to expose a [Loki push API](../../../api#post-lokiapiv1push) server. Each job configured with a `loki_push_api_config` will expose this API and will require a separate port. @@ -849,7 +861,7 @@ for them. It is the canonical way to specify static targets in a scrape configuration. ```yaml -# Configures the discovery to look on the current machine. +# Configures the discovery to look on the current machine. # This is required by the prometheus service discovery code but doesn't # really apply to Promtail which can ONLY look at files on the local machine # As such it should only have the value of localhost, OR it can be excluded @@ -1100,9 +1112,9 @@ sync_period: "10s" ## Example Docker Config -It's fairly difficult to tail Docker files on a standalone machine because they are in different locations for every OS. We recommend the [Docker logging driver](../../../cmd/docker-driver/README.md) for local Docker installs or Docker Compose. +It's fairly difficult to tail Docker files on a standalone machine because they are in different locations for every OS. We recommend the [Docker logging driver](../../docker-driver/) for local Docker installs or Docker Compose. -If running in a Kubernetes environment, you should look at the defined configs which are in [helm](../../../production/helm/promtail/templates/configmap.yaml) and [jsonnet](../../../production/ksonnet/promtail/scrape_config.libsonnet), these leverage the prometheus service discovery libraries (and give promtail it's name) for automatically finding and tailing pods. The jsonnet config explains with comments what each section is for. +If running in a Kubernetes environment, you should look at the defined configs which are in [helm](https://github.com/grafana/loki/tree/master/production/helm/promtail/templates/configmap.yaml) and [jsonnet](https://github.com/grafana/loki/tree/master/production/ksonnet/promtail/scrape_config.libsonnet), these leverage the prometheus service discovery libraries (and give promtail it's name) for automatically finding and tailing pods. The jsonnet config explains with comments what each section is for. ## Example Static Config diff --git a/docs/clients/promtail/installation.md b/docs/sources/clients/promtail/installation.md similarity index 98% rename from docs/clients/promtail/installation.md rename to docs/sources/clients/promtail/installation.md index fdf2dc3fc6f8..f4f791b9a3d2 100644 --- a/docs/clients/promtail/installation.md +++ b/docs/sources/clients/promtail/installation.md @@ -1,3 +1,6 @@ +--- +title: Installation +--- # Install Promtail Promtail is distributed as a [binary](#binary), [Docker container](#docker), and @@ -69,7 +72,7 @@ spec: hostPath: path: HOST_PATH - name: promtail-config - configMap: + configMap: name: promtail-configmap containers: - name: promtail-container diff --git a/docs/clients/promtail/pipelines.md b/docs/sources/clients/promtail/pipelines.md similarity index 86% rename from docs/clients/promtail/pipelines.md rename to docs/sources/clients/promtail/pipelines.md index 2a0db9e43552..ef4b4f315c90 100644 --- a/docs/clients/promtail/pipelines.md +++ b/docs/sources/clients/promtail/pipelines.md @@ -1,3 +1,6 @@ +--- +title: Pipelines +--- # Pipelines A detailed look at how to setup Promtail to process your log lines, including @@ -22,13 +25,13 @@ stages: condition. Typical pipelines will start with a parsing stage (such as a -[regex](./stages/regex.md) or [json](./stages/json.md) stage) to extract data +[regex](../stages/regex/) or [json](../stages/json/) stage) to extract data from the log line. Then, a series of action stages will be present to do something with that extracted data. The most common action stage will be a -[labels](./stages/labels.md) stage to turn extracted data into a label. +[labels](../stages/labels/) stage to turn extracted data into a label. -A common stage will also be the [match](./stages/match.md) stage to selectively -apply stages or drop entries based on a [LogQL stream selector and filter expressions](../../logql.md). +A common stage will also be the [match](../stages/match/) stage to selectively +apply stages or drop entries based on a [LogQL stream selector and filter expressions](../../../logql/). Note that pipelines can not currently be used to deduplicate logs; Loki will receive the same log line multiple times if, for example: @@ -196,23 +199,23 @@ given log entry. Parsing stages: - * [docker](./stages/docker.md): Extract data by parsing the log line using the standard Docker format. - * [cri](./stages/cri.md): Extract data by parsing the log line using the standard CRI format. - * [regex](./stages/regex.md): Extract data using a regular expression. - * [json](./stages/json.md): Extract data by parsing the log line as JSON. + * [docker](../stages/docker/): Extract data by parsing the log line using the standard Docker format. + * [cri](../stages/cri/): Extract data by parsing the log line using the standard CRI format. + * [regex](../stages/regex/): Extract data using a regular expression. + * [json](../stages/json/): Extract data by parsing the log line as JSON. Transform stages: - * [template](./stages/template.md): Use Go templates to modify extracted data. + * [template](../stages/template/): Use Go templates to modify extracted data. Action stages: - * [timestamp](./stages/timestamp.md): Set the timestamp value for the log entry. - * [output](./stages/output.md): Set the log line text. - * [labels](./stages/labels.md): Update the label set for the log entry. - * [metrics](./stages/metrics.md): Calculate metrics based on extracted data. - * [tenant](./stages/tenant.md): Set the tenant ID value to use for the log entry. + * [timestamp](../stages/timestamp/): Set the timestamp value for the log entry. + * [output](../stages/output/): Set the log line text. + * [labels](../stages/labels/): Update the label set for the log entry. + * [metrics](../stages/metrics/): Calculate metrics based on extracted data. + * [tenant](../stages/tenant/): Set the tenant ID value to use for the log entry. Filtering stages: - * [match](./stages/match.md): Conditionally run stages based on the label set. + * [match](../stages/match/): Conditionally run stages based on the label set. diff --git a/docs/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md similarity index 99% rename from docs/clients/promtail/scraping.md rename to docs/sources/clients/promtail/scraping.md index 570a596a427e..40c444982b24 100644 --- a/docs/clients/promtail/scraping.md +++ b/docs/sources/clients/promtail/scraping.md @@ -1,3 +1,6 @@ +--- +title: Scraping +--- # Promtail Scraping (Service Discovery) ## File Target Discovery @@ -131,7 +134,7 @@ field from the journal was transformed into a label called `unit` through Here's an example where the `SYSTEMD_UNIT`, `HOSTNAME`, and `SYSLOG_IDENTIFIER` are relabeled for use in Loki. Keep in mind that labels prefixed with `__` will be dropped, so relabeling is required to keep these labels. - + ```yaml - job_name: systemd-journal journal: @@ -271,5 +274,5 @@ clients: - [ ] ``` -Refer to [`client_config`](./configuration.md#client_config) from the Promtail +Refer to [`client_config`](../configuration#client_config) from the Promtail Configuration reference for all available options. diff --git a/docs/sources/clients/promtail/stages/_index.md b/docs/sources/clients/promtail/stages/_index.md new file mode 100644 index 000000000000..afe7e4d9eeff --- /dev/null +++ b/docs/sources/clients/promtail/stages/_index.md @@ -0,0 +1,32 @@ +--- +title: Stages +--- +# Stages + +This section is a collection of all stages Promtail supports in a +[Pipeline](../pipelines/). + +Parsing stages: + + * [docker](docker/): Extract data by parsing the log line using the standard Docker format. + * [cri](cri/): Extract data by parsing the log line using the standard CRI format. + * [regex](regex/): Extract data using a regular expression. + * [json](json/): Extract data by parsing the log line as JSON. + * [replace](replace/): Replace data using a regular expression. + +Transform stages: + + * [template](template/): Use Go templates to modify extracted data. + +Action stages: + + * [timestamp](timestamp/): Set the timestamp value for the log entry. + * [output](output/): Set the log line text. + * [labels](labels/): Update the label set for the log entry. + * [metrics](metrics/): Calculate metrics based on extracted data. + * [tenant](tenant/): Set the tenant ID value to use for the log entry. + +Filtering stages: + + * [match](match/): Conditionally run stages based on the label set. + diff --git a/docs/clients/promtail/stages/cri.md b/docs/sources/clients/promtail/stages/cri.md similarity index 98% rename from docs/clients/promtail/stages/cri.md rename to docs/sources/clients/promtail/stages/cri.md index 14c3db4697bb..c4564d357c7e 100644 --- a/docs/clients/promtail/stages/cri.md +++ b/docs/sources/clients/promtail/stages/cri.md @@ -1,3 +1,6 @@ +--- +title: cri +--- # `cri` stage The `cri` stage is a parsing stage that reads the log line using the standard CRI logging format. diff --git a/docs/clients/promtail/stages/docker.md b/docs/sources/clients/promtail/stages/docker.md similarity index 97% rename from docs/clients/promtail/stages/docker.md rename to docs/sources/clients/promtail/stages/docker.md index da0fc7ee05fc..ebd087e08d71 100644 --- a/docs/clients/promtail/stages/docker.md +++ b/docs/sources/clients/promtail/stages/docker.md @@ -1,3 +1,6 @@ +--- +title: docker +--- # `docker` stage The `docker` stage is a parsing stage that reads log lines in the standard diff --git a/docs/clients/promtail/stages/json.md b/docs/sources/clients/promtail/stages/json.md similarity index 96% rename from docs/clients/promtail/stages/json.md rename to docs/sources/clients/promtail/stages/json.md index ba20011ab59d..8b6673305ecd 100644 --- a/docs/clients/promtail/stages/json.md +++ b/docs/sources/clients/promtail/stages/json.md @@ -1,3 +1,6 @@ +--- +title: json +--- # `json` stage The `json` stage is a parsing stage that reads the log line as JSON and accepts @@ -25,7 +28,7 @@ This stage uses the Go JSON unmarshaler, which means non-string types like numbers or booleans will be unmarshaled into those types. The extracted data can hold non-string values and this stage does not do any type conversions; downstream stages will need to perform correct type conversion of these values -as necessary. Please refer to the [the `template` stage](./template.md) for how +as necessary. Please refer to the [the `template` stage](../template/) for how to do this. If the value extracted is a complex type, such as an array or a JSON object, it diff --git a/docs/clients/promtail/stages/labels.md b/docs/sources/clients/promtail/stages/labels.md similarity index 97% rename from docs/clients/promtail/stages/labels.md rename to docs/sources/clients/promtail/stages/labels.md index 171d818019c5..9d4cf2a1f0db 100644 --- a/docs/clients/promtail/stages/labels.md +++ b/docs/sources/clients/promtail/stages/labels.md @@ -1,3 +1,6 @@ +--- +title: labels +--- # `labels` stage The labels stage is an action stage that takes data from the extracted map and diff --git a/docs/clients/promtail/stages/match.md b/docs/sources/clients/promtail/stages/match.md similarity index 94% rename from docs/clients/promtail/stages/match.md rename to docs/sources/clients/promtail/stages/match.md index 0d257c85b7f0..a380a4924662 100644 --- a/docs/clients/promtail/stages/match.md +++ b/docs/sources/clients/promtail/stages/match.md @@ -1,7 +1,10 @@ +--- +title: match +--- # `match` stage The match stage is a filtering stage that conditionally applies a set of stages -or drop entries when a log entry matches a configurable [LogQL](../../../logql.md) +or drop entries when a log entry matches a configurable [LogQL](../../../../logql/) stream selector and filter expressions. ## Schema @@ -16,10 +19,10 @@ match: # concatenated with job_name using an underscore. [pipeline_name: ] - # Determines what action is taken when the selector matches the log + # Determines what action is taken when the selector matches the log # line. Defaults to keep. When set to drop, entries will be dropped # and no later metrics will be recorded. - # Stages must be not defined when dropping entries. + # Stages must be not defined when dropping entries. [action: | default = "keep"] # Nested set of pipeline stages only if the selector @@ -38,7 +41,7 @@ match: ] ``` -Refer to the [Promtail Configuration Reference](../configuration.md) for the +Refer to the [Promtail Configuration Reference](../../configuration/) for the schema on the various other stages referenced here. ### Example diff --git a/docs/clients/promtail/stages/metrics.md b/docs/sources/clients/promtail/stages/metrics.md similarity index 98% rename from docs/clients/promtail/stages/metrics.md rename to docs/sources/clients/promtail/stages/metrics.md index 7ba827c25639..e4b5874daf51 100644 --- a/docs/clients/promtail/stages/metrics.md +++ b/docs/sources/clients/promtail/stages/metrics.md @@ -1,3 +1,6 @@ +--- +title: metrics +--- # `metrics` stage The `metrics` stage is an action stage that allows for defining and updating @@ -50,7 +53,7 @@ config: # It is an error to specify `count_entry_bytes: true` without specifying `match_all: true` # It is an error to specify `count_entry_bytes: true` without specifying `action: add` [count_entry_bytes: ] - + # Filters down source data and only changes the metric # if the targeted value exactly matches the provided string. # If not present, all data will match. @@ -170,15 +173,15 @@ config: action: add ``` -This pipeline creates a `log_lines_total` counter which increments for every log line received +This pipeline creates a `log_lines_total` counter which increments for every log line received by using the `match_all: true` parameter. -It also creates a `log_bytes_total` counter which adds the byte size of every log line received +It also creates a `log_bytes_total` counter which adds the byte size of every log line received to the counter by using the `count_entry_bytes: true` parameter. Those two metrics will disappear after 1d if they don't receive new entries, this is useful to reduce the building up of stage metrics. -The combination of these two metric stages will give you two counters to track the volume of +The combination of these two metric stages will give you two counters to track the volume of every log stream in both number of lines and bytes, which can be useful in identifying sources of very high volume, as well as helping understand why you may have too much cardinality. diff --git a/docs/clients/promtail/stages/output.md b/docs/sources/clients/promtail/stages/output.md similarity index 97% rename from docs/clients/promtail/stages/output.md rename to docs/sources/clients/promtail/stages/output.md index 6cf39560e76a..c57d937d894e 100644 --- a/docs/clients/promtail/stages/output.md +++ b/docs/sources/clients/promtail/stages/output.md @@ -1,3 +1,6 @@ +--- +title: output +--- # `output` stage The `output` stage is an action stage that takes data from the extracted map and diff --git a/docs/clients/promtail/stages/regex.md b/docs/sources/clients/promtail/stages/regex.md similarity index 99% rename from docs/clients/promtail/stages/regex.md rename to docs/sources/clients/promtail/stages/regex.md index 0df77350b0b3..44cb627abca6 100644 --- a/docs/clients/promtail/stages/regex.md +++ b/docs/sources/clients/promtail/stages/regex.md @@ -1,3 +1,6 @@ +--- +title: regex +--- # `regex` stage The `regex` stage is a parsing stage that parses a log line using a regular diff --git a/docs/clients/promtail/stages/replace.md b/docs/sources/clients/promtail/stages/replace.md similarity index 97% rename from docs/clients/promtail/stages/replace.md rename to docs/sources/clients/promtail/stages/replace.md index ba2ea3b89a4d..e7f25e6cb13f 100644 --- a/docs/clients/promtail/stages/replace.md +++ b/docs/sources/clients/promtail/stages/replace.md @@ -1,3 +1,6 @@ +--- +title: replace +--- # `replace` stage The `replace` stage is a parsing stage that parses a log line using a regular @@ -98,12 +101,12 @@ and replaces the `msg` value. `msg` in extracted will now become - `msg`: `11.11.11.11 - "POST /loki/api/v1/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"` -### With `replace` value in `template` format +### With `replace` value in `template` format Given the pipeline: ```yaml -- replace: +- replace: expression: "^(\\S+) (\\S+) (\\S+) \\[([\\w:/]+\\s[+\\-]\\d{4})\\] \"(\\S+)\\s?(\\S+)?\\s?(\\S+)?\" (\\d{3}|-) (\\d+|-)\\s?\"?([^\"]*)\"?\\s?\"?([^\"]*)?\"?$" replace: '{{ if eq .Value "200" }}{{ Replace .Value "200" "HttpStatusOk" -1 }}{{ else }}{{ .Value | ToUpper }}{{ end }}' ``` @@ -122,12 +125,12 @@ The log line would become 11.11.11.11 - frank [25/Jan/2000:14:00:01 -0500] "GET /1986.js HTTP/1.1" HttpStatusOk 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6" ``` -### `replace` with named captured group +### `replace` with named captured group Given the pipeline: ```yaml -- replace: +- replace: expression: "^(?P\\S+) (?P\\S+) (?P\\S+) \\[(?P[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P\\S+)\\s?(?P\\S+)?\\s?(?P\\S+)?\" (?P\\d{3}|-) (?P\\d+|-)\\s?\"?(?P[^\"]*)\"?\\s?\"?(?P[^\"]*)?\"?$" replace: '{{ .Value | ToUpper }}' ``` @@ -164,7 +167,7 @@ The log line would become Given the pipeline: ```yaml -- replace: +- replace: expression: "^(?P\\S+) (?P\\S+) (\\S+) \\[(?P[\\w:/]+\\s[+\\-]\\d{4})\\] \"(?P\\S+)\\s?(?P\\S+)?\\s?(?P\\S+)?\" (?P\\d{3}|-) (?P\\d+|-)\\s?\"?(?P[^\"]*)\"?\\s?\"?(?P[^\"]*)?\"?$" replace: '{{ .Value | ToUpper }}' ``` diff --git a/docs/clients/promtail/stages/template.md b/docs/sources/clients/promtail/stages/template.md similarity index 93% rename from docs/clients/promtail/stages/template.md rename to docs/sources/clients/promtail/stages/template.md index a6959d4245d1..5d38ed14a0bd 100644 --- a/docs/clients/promtail/stages/template.md +++ b/docs/sources/clients/promtail/stages/template.md @@ -1,3 +1,6 @@ +--- +title: template +--- # `template` stage The `template` stage is a transform stage that lets use manipulate the values in @@ -6,10 +9,10 @@ syntax](https://golang.org/pkg/text/template/). The `template` stage is primarily useful for manipulating data from other stages before setting them as labels, such as to replace spaces with underscores or -converting an uppercase string into a lowercase one. `template` can also be used +converting an uppercase string into a lowercase one. `template` can also be used to construct messages with multiple keys. -The template stage can also create new keys in the extracted map. +The template stage can also create new keys in the extracted map. ## Schema @@ -66,11 +69,11 @@ contained `app` with a value of `LOKI`, this pipeline would change its value to ``` This pipeline takes the current value of `level` and `app` from the extracted map and -a new key `output_msg` will be added to extracted map with evaluated template. +a new key `output_msg` will be added to extracted map with evaluated template. -For example, if the extracted map contained `app` with a value of `loki`, this pipeline would change its value to `LOKI`. Assuming value of `level` is `warn`. A new key `output_msg` will be added to extracted map with value `warn for app LOKI`. +For example, if the extracted map contained `app` with a value of `loki`, this pipeline would change its value to `LOKI`. Assuming value of `level` is `warn`. A new key `output_msg` will be added to extracted map with value `warn for app LOKI`. -Any previously extracted keys can be used in `template`. All extracted keys are available for `template` to expand. +Any previously extracted keys can be used in `template`. All extracted keys are available for `template` to expand. ```yaml - template: @@ -79,11 +82,11 @@ Any previously extracted keys can be used in `template`. All extracted keys are ``` This pipeline takes the current value of `level`, `app` and `module` from the extracted map and -converts value of `app` to the evaluated template. +converts value of `app` to the evaluated template. -For example, if the extracted map contained `app` with a value of `loki`, this pipeline would change its value to `LOKI`. Assuming value of `level` is `warn` and value of `module` is `test`. Pipeline will change the value of `app` to `warn for app LOKI in module test`. +For example, if the extracted map contained `app` with a value of `loki`, this pipeline would change its value to `LOKI`. Assuming value of `level` is `warn` and value of `module` is `test`. Pipeline will change the value of `app` to `warn for app LOKI in module test`. -Any previously extracted keys can be used in `template`. All extracted keys are available for `template` to expand. Also, if source is available it can be referred as `.Value` in `template`. Here, `app` is provided as `source`. So, it can be referred as `.Value` in `template`. +Any previously extracted keys can be used in `template`. All extracted keys are available for `template` to expand. Also, if source is available it can be referred as `.Value` in `template`. Here, `app` is provided as `source`. So, it can be referred as `.Value` in `template`. ```yaml - template: diff --git a/docs/clients/promtail/stages/tenant.md b/docs/sources/clients/promtail/stages/tenant.md similarity index 95% rename from docs/clients/promtail/stages/tenant.md rename to docs/sources/clients/promtail/stages/tenant.md index cf0e51e35a0d..8b70868f242d 100644 --- a/docs/clients/promtail/stages/tenant.md +++ b/docs/sources/clients/promtail/stages/tenant.md @@ -1,8 +1,11 @@ +--- +title: tenant +--- # `tenant` stage The tenant stage is an action stage that sets the tenant ID for the log entry picking it from a field in the extracted data map. If the field is missing, the -default promtail client [`tenant_id`](../configuration.md#client_config) will +default promtail client [`tenant_id`](../../configuration#client_config) will be used. diff --git a/docs/clients/promtail/stages/timestamp.md b/docs/sources/clients/promtail/stages/timestamp.md similarity index 99% rename from docs/clients/promtail/stages/timestamp.md rename to docs/sources/clients/promtail/stages/timestamp.md index 3c1de27a949c..b2256e36b5d3 100644 --- a/docs/clients/promtail/stages/timestamp.md +++ b/docs/sources/clients/promtail/stages/timestamp.md @@ -1,3 +1,6 @@ +--- +title: timestamp +--- # `timestamp` stage The `timestamp` stage is an action stage that can change the timestamp of a log diff --git a/docs/clients/promtail/troubleshooting.md b/docs/sources/clients/promtail/troubleshooting.md similarity index 93% rename from docs/clients/promtail/troubleshooting.md rename to docs/sources/clients/promtail/troubleshooting.md index ab22877a1d9b..3cde9bb2f0a0 100644 --- a/docs/clients/promtail/troubleshooting.md +++ b/docs/sources/clients/promtail/troubleshooting.md @@ -1,3 +1,6 @@ +--- +title: Troubleshooting +--- # Troubleshooting Promtail This document describes known failure modes of `promtail` on edge cases and the @@ -8,7 +11,7 @@ adopted trade-offs. Promtail can be configured to print log stream entries instead of sending them to Loki. This can be used in combination with [piping data](#pipe-data-to-promtail) to debug or troubleshoot promtail log parsing. -In dry run mode, Promtail still support reading from a [positions](configuration.md#position_config) file however no update will be made to the targeted file, this is to ensure you can easily retry the same set of lines. +In dry run mode, Promtail still support reading from a [positions](../configuration#position_config) file however no update will be made to the targeted file, this is to ensure you can easily retry the same set of lines. To start Promtail in dry run mode use the flag `--dry-run` as shown in the example below: @@ -36,9 +39,9 @@ This will add labels `k1` and `k2` with respective values `v1` and `v2`. In pipe mode Promtail also support file configuration using `--config.file`, however do note that positions config is not used and only **the first scrape config is used**. -[`static_configs:`](./configuration) can be used to provide static labels, although the targets property is ignored. +[`static_configs:`](../configuration) can be used to provide static labels, although the targets property is ignored. -If you don't provide any [`scrape_config:`](./configuration#scrape_config) a default one is used which will automatically adds the following default labels: `{job="stdin",hostname=""}`. +If you don't provide any [`scrape_config:`](../configuration#scrape_config) a default one is used which will automatically adds the following default labels: `{job="stdin",hostname=""}`. For example you could use this config below to parse and add the label `level` on all your piped logs: @@ -160,7 +163,7 @@ sent again to the ingester on `promtail` restart. However, it's important to note that Loki will reject all log lines received in what it perceives is [out of -order](../../overview/README.md#timestamp-ordering). If `promtail` happens to +order](../../../overview#timestamp-ordering). If `promtail` happens to crash, it may re-send log lines that were sent prior to the crash. The default behavior of Promtail is to assign a timestamp to logs at the time it read the entry from the tailed file. This would result in duplicate log lines being sent diff --git a/docs/sources/community/_index.md b/docs/sources/community/_index.md new file mode 100644 index 000000000000..e3f9501046c8 --- /dev/null +++ b/docs/sources/community/_index.md @@ -0,0 +1,9 @@ +--- +title: Community +weight: 1100 +--- +# Community + +1. [Governance](governance/) +2. [Getting in Touch](getting-in-touch/) +3. [Contributing](contributing/) diff --git a/docs/community/contributing.md b/docs/sources/community/contributing.md similarity index 92% rename from docs/community/contributing.md rename to docs/sources/community/contributing.md index cc0a7e1e3b38..4e479cf13ee9 100644 --- a/docs/community/contributing.md +++ b/docs/sources/community/contributing.md @@ -1,3 +1,6 @@ +--- +title: Contributing to Loki +--- # Contributing to Loki Loki uses [GitHub](https://github.com/grafana/loki) to manage reviews of pull requests: @@ -57,4 +60,4 @@ $ make loki promtail logcli ## Contribute to the Helm Chart -Please follow the [Helm documentation](../../production/helm/README.md). +Please follow the [Helm documentation](https://github.com/grafana/loki/tree/master/production/helm/). diff --git a/docs/community/getting-in-touch.md b/docs/sources/community/getting-in-touch.md similarity index 94% rename from docs/community/getting-in-touch.md rename to docs/sources/community/getting-in-touch.md index 51531d528db3..10e6789eade0 100644 --- a/docs/community/getting-in-touch.md +++ b/docs/sources/community/getting-in-touch.md @@ -1,3 +1,6 @@ +--- +title: Contacting the Loki Team +--- # Contacting the Loki Team If you have any questions or feedback regarding Loki: diff --git a/docs/community/governance.md b/docs/sources/community/governance.md similarity index 99% rename from docs/community/governance.md rename to docs/sources/community/governance.md index 874e77f907b9..fde230248f13 100644 --- a/docs/community/governance.md +++ b/docs/sources/community/governance.md @@ -1,6 +1,9 @@ +--- +title: Governance +--- # Governance -This document describes the rules and governance of the project. It is meant to be followed by all the developers of the project and the Loki community. Common terminology used in this governance document are listed below: +This document describes the rules and governance of the project. It is meant to be followed by all the developers of the project and the Loki community. Common terminology used in this governance document are listed below: * **Team members**: Any members of the private [team mailing list][team]. diff --git a/docs/configuration/README.md b/docs/sources/configuration/_index.md similarity index 96% rename from docs/configuration/README.md rename to docs/sources/configuration/_index.md index 9f143a835c5a..e3225ed682f0 100644 --- a/docs/configuration/README.md +++ b/docs/sources/configuration/_index.md @@ -1,36 +1,41 @@ +--- +title: Configuration +weight: 500 +--- # Configuring Loki Loki is configured in a YAML file (usually referred to as `loki.yaml` ) which contains information on the Loki server and its individual components, depending on which mode Loki is launched in. -Configuration examples can be found in the [Configuration Examples](./examples.md) document. - -* [Printing Loki Config At Runtime](#printing-loki-config-at-runtime) -* [Configuration File Reference](#configuration-file-reference) -* [server_config](#server_config) -* [distributor_config](#distributor_config) -* [querier_config](#querier_config) -* [query_frontend_config](#query_frontend_config) -* [queryrange_config](#queryrange_config) -* [frontend_worker_config](#frontend_worker_config) -* [ingester_client_config](#ingester_client_config) -* [ingester_config](#ingester_config) -* [consul_config](#consul_config) -* [etcd_config](#etcd_config) -* [memberlist_config](#memberlist_config) -* [storage_config](#storage_config) -* [chunk_store_config](#chunk_store_config) -* [cache_config](#cache_config) -* [schema_config](#schema_config) -* [period_config](#period_config) -* [limits_config](#limits_config) -* [grpc_client_config](#grpc_client_config) -* [table_manager_config](#table_manager_config) -* [provision_config](#provision_config) -* [auto_scaling_config](#auto_scaling_config) -* [tracing_config](#tracing_config) -* [Runtime Configuration file](#runtime-configuration-file) +Configuration examples can be found in the [Configuration Examples](examples/) document. + +- [Configuring Loki](#configuring-loki) + - [Printing Loki Config At Runtime](#printing-loki-config-at-runtime) + - [Configuration File Reference](#configuration-file-reference) + - [server_config](#server_config) + - [distributor_config](#distributor_config) + - [querier_config](#querier_config) + - [query_frontend_config](#query_frontend_config) + - [queryrange_config](#queryrange_config) + - [`frontend_worker_config`](#frontend_worker_config) + - [ingester_client_config](#ingester_client_config) + - [ingester_config](#ingester_config) + - [consul_config](#consul_config) + - [etcd_config](#etcd_config) + - [memberlist_config](#memberlist_config) + - [storage_config](#storage_config) + - [chunk_store_config](#chunk_store_config) + - [cache_config](#cache_config) + - [schema_config](#schema_config) + - [period_config](#period_config) + - [limits_config](#limits_config) + - [grpc_client_config](#grpc_client_config) + - [table_manager_config](#table_manager_config) + - [provision_config](#provision_config) + - [auto_scaling_config](#auto_scaling_config) + - [tracing_config](#tracing_config) + - [Runtime Configuration file](#runtime-configuration-file) ## Printing Loki Config At Runtime @@ -40,15 +45,15 @@ overrides from config file, and second by overrides from flags. The result is the value for every config object in the Loki config struct, which is very large... -Many values will not be relevant to your install such as storage configs which you are not using and which you did not define, +Many values will not be relevant to your install such as storage configs which you are not using and which you did not define, this is expected as every option has a default value if it is being used or not. This config is what Loki will use to run, it can be invaluable for debugging issues related to configuration and is especially useful in making sure your config files and flags are being read and loaded properly. -`-print-config-stderr` is nice when running Loki directly e.g. `./loki ` as you can get a quick output of the entire Loki config. +`-print-config-stderr` is nice when running Loki directly e.g. `./loki ` as you can get a quick output of the entire Loki config. -`-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so +`-log-config-reverse-order` is the flag we run Loki with in all our environments, the config entries are reversed so that the order of configs reads correctly top to bottom when viewed in Grafana's Explore. ## Configuration File Reference @@ -60,12 +65,12 @@ non-list parameters the value is set to the specified default. Generic placeholders are defined as follows: -* `` : a boolean that can take the values `true` or `false` -* `` : any integer matching the regular expression `[1-9]+[0-9]*` -* `` : a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` -* `` : a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` +* `` : a boolean that can take the values `true` or `false` +* `` : any integer matching the regular expression `[1-9]+[0-9]*` +* `` : a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` +* `` : a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` * `` : a string of unicode characters -* `` : a valid path relative to current working directory or an absolute path. +* `` : a valid path relative to current working directory or an absolute path. * `` : a valid string consisting of a hostname or IP followed by an optional port number * `` : a regular string * `` : a regular string that is a secret, such as a password @@ -220,7 +225,7 @@ ring: # Configuration for an ETCD v3 client. Only applies if store is "etcd" # The CLI flags prefix for this block config is: distributor.ring [etcd: ] - + # Configuration for Gossip memberlist. Only applies if store is "memberlist" # The CLI flags prefix for this block config is: distributor.ring [memberlist: ] @@ -327,7 +332,7 @@ results_cache: [parallelise_shardable_queries: | default = false] ``` -## `frontend_worker_config` +## `frontend_worker_config` The `frontend_worker_config` configures the worker - running within the Loki querier - picking up and executing queries enqueued by the query-frontend. @@ -360,7 +365,7 @@ pool_config: # CLI flag: -distributor.health-check-ingesters [health_check_ingesters: | default = false] - # How frequently to clean up clients for servers that have gone away after + # How frequently to clean up clients for servers that have gone away after # a health check. # CLI flag: -distributor.client-cleanup-period [client_cleanup_period: | default = 15s] @@ -515,10 +520,10 @@ lifecycler: # CLI flag: -ingester.max-chunk-age [max_chunk_age: | default = 1h] -# How far in the past an ingester is allowed to query the store for data. +# How far in the past an ingester is allowed to query the store for data. # This is only useful for running multiple loki binaries with a shared ring with a `filesystem` store which is NOT shared between the binaries # When using any "shared" object store like S3 or GCS this value must always be left as 0 -# It is an error to configure this to a non-zero value when using any object store other than `filesystem` +# It is an error to configure this to a non-zero value when using any object store other than `filesystem` # Use a value of -1 to allow the ingester to query the store infinitely far back in time. # CLI flag: -ingester.query-store-max-look-back-period [query_store_max_look_back_period: | default = 0] @@ -678,8 +683,8 @@ block. # Configures storing chunks in AWS. Required options only required when aws is # present. aws: - # S3 or S3-compatible endpoint URL with escaped Key and Secret encoded. - # If only region is specified as a host, the proper endpoint will be deduced. + # S3 or S3-compatible endpoint URL with escaped Key and Secret encoded. + # If only region is specified as a host, the proper endpoint will be deduced. # Use inmemory:/// to use a mock in-memory implementation. # CLI flag: -s3.url [s3: ] @@ -1363,11 +1368,11 @@ The `provision_config` block configures provisioning capacity for DynamoDB. ```yaml # Enables on-demand throughput provisioning for the storage # provider, if supported. Applies only to tables which are not autoscaled. -# CLI flag: -.enable-ondemand-throughput-mode +# CLI flag: -.enable-ondemand-throughput-mode [enable_ondemand_throughput_mode: | default = false] # DynamoDB table default write throughput. -# CLI flag: -.write-throughput +# CLI flag: -.write-throughput [provisioned_write_throughput: | default = 3000] # DynamoDB table default read throughput. @@ -1376,19 +1381,19 @@ The `provision_config` block configures provisioning capacity for DynamoDB. # Enables on-demand throughput provisioning for the storage provide, # if supported. Applies only to tables which are not autoscaled. -# CLI flag: -.inactive-enable-ondemand-throughput-mode +# CLI flag: -.inactive-enable-ondemand-throughput-mode [enable_inactive_throughput_on_demand_mode: | default = false] # DynamoDB table write throughput for inactive tables. -# CLI flag: -.inactive-write-throughput +# CLI flag: -.inactive-write-throughput [inactive_write_throughput: | default = 1] # DynamoDB table read throughput for inactive tables. -# CLI flag: -.inactive-read-throughput +# CLI flag: -.inactive-read-throughput [inactive_read_throughput: | Default = 300] # Active table write autoscale config. -# The CLI flags prefix for this block config is: -.write-throughput +# The CLI flags prefix for this block config is: -.write-throughput [write_scale: ] # Inactive table write autoscale config. @@ -1396,7 +1401,7 @@ The `provision_config` block configures provisioning capacity for DynamoDB. [inactive_write_scale: ] # Number of last inactive tables to enable write autoscale. -# CLI flag: -.enable-ondemand-throughput-mode +# CLI flag: -.enable-ondemand-throughput-mode [inactive_write_scale_lastn: ] # Active table read autoscale config. @@ -1408,7 +1413,7 @@ The `provision_config` block configures provisioning capacity for DynamoDB. [inactive_read_scale: ] # Number of last inactive tables to enable read autoscale. -# CLI flag: -.enable-ondemand-throughput-mode +# CLI flag: -.enable-ondemand-throughput-mode [inactive_read_scale_lastn: ] ``` diff --git a/docs/configuration/examples.md b/docs/sources/configuration/examples.md similarity index 87% rename from docs/configuration/examples.md rename to docs/sources/configuration/examples.md index b40e152fc54d..9b569f65f359 100644 --- a/docs/configuration/examples.md +++ b/docs/sources/configuration/examples.md @@ -1,12 +1,18 @@ +--- +title: Examples +--- # Loki Configuration Examples -1. [Complete Local Config](#complete-local-config) -2. [Google Cloud Storage](#google-cloud-storage) -3. [Cassandra Index](#cassandra-index) -4. [AWS](#aws) -5. [Almost zero dependencies setup with Memberlist and BoltDB Shipper](#almost-zero-dependencies-setup) -6. [Schema config to migrate to new changes such as store, schema, index period etc..](#schema_config) -7. [Using the query-frontend](#query-frontend) +- [Loki Configuration Examples](#loki-configuration-examples) + - [Complete Local config](#complete-local-config) + - [Google Cloud Storage](#google-cloud-storage) + - [Cassandra Index](#cassandra-index) + - [AWS](#aws) + - [S3-compatible APIs](#s3-compatible-apis) + - [S3 Expanded Config](#s3-expanded-config) + - [Almost zero dependencies setup](#almost-zero-dependencies-setup) + - [schema_config](#schema_config) + - [Query Frontend](#query-frontend) ## Complete Local config @@ -151,7 +157,7 @@ storage_config: ### S3 Expanded Config S3 config now supports expanded config. Either `s3` endpoint URL can be used -or expanded config can be used. +or expanded config can be used. ```yaml storage_config: @@ -161,7 +167,7 @@ storage_config: access_key_id: s3_access_key_id secret_access_key: s3_secret_access_key insecure: false - sse_encryption: false + sse_encryption: false http_config: idle_conn_timeout: 90s response_header_timeout: 0s @@ -173,7 +179,7 @@ storage_config: This is a configuration to deploy Loki depending only on storage solution, e.g. an S3-compatible API like minio. The ring configuration is based on the gossip memberlist -and the index is shipped to storage via [boltdb-shipper](../operations/storage/boltdb-shipper.md). +and the index is shipped to storage via [boltdb-shipper](../../operations/storage/boltdb-shipper/). ```yaml auth_enabled: false @@ -243,8 +249,8 @@ limits_config: ```yaml configs: - # Starting from 2018-04-15 Loki should store indexes on Cassandra - # using weekly periodic tables and chunks on filesystem. + # Starting from 2018-04-15 Loki should store indexes on Cassandra + # using weekly periodic tables and chunks on filesystem. # The index tables will be prefixed with "index_". - from: "2018-04-15" store: cassandra @@ -266,4 +272,4 @@ configs: ## Query Frontend -[example configuration](./query-frontend.md) +[example configuration](../query-frontend/) diff --git a/docs/configuration/query-frontend.md b/docs/sources/configuration/query-frontend.md similarity index 98% rename from docs/configuration/query-frontend.md rename to docs/sources/configuration/query-frontend.md index 2fcd2e8b1188..aaffa009bf74 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/sources/configuration/query-frontend.md @@ -1,3 +1,6 @@ +--- +title: Query Frontend +--- ## Kubernetes Query Frontend Example ### Disclaimer @@ -41,7 +44,7 @@ data: align_queries_with_step: true max_retries: 5 # parallelize queries in 15min intervals - split_queries_by_interval: 15m + split_queries_by_interval: 15m cache_results: true results_cache: diff --git a/docs/design-documents/2020-02-Promtail-Push-API.md b/docs/sources/design-documents/2020-02-Promtail-Push-API.md similarity index 99% rename from docs/design-documents/2020-02-Promtail-Push-API.md rename to docs/sources/design-documents/2020-02-Promtail-Push-API.md index f7028a5fd0e3..a6f8134e7f48 100644 --- a/docs/design-documents/2020-02-Promtail-Push-API.md +++ b/docs/sources/design-documents/2020-02-Promtail-Push-API.md @@ -1,3 +1,6 @@ +--- +title: Promtail Push API +--- # Promtail Push API - Author: Robert Fratto (@rfratto) diff --git a/docs/sources/design-documents/_index.md b/docs/sources/design-documents/_index.md new file mode 100644 index 000000000000..de4bb396a9ff --- /dev/null +++ b/docs/sources/design-documents/_index.md @@ -0,0 +1,4 @@ +--- +title: Design documents +menu_exclude: true +--- \ No newline at end of file diff --git a/docs/design-documents/labels.md b/docs/sources/design-documents/labels.md similarity index 95% rename from docs/design-documents/labels.md rename to docs/sources/design-documents/labels.md index 6a885c22fa65..96b95084638e 100644 --- a/docs/design-documents/labels.md +++ b/docs/sources/design-documents/labels.md @@ -1,5 +1,8 @@ +--- +title: Labels +--- # Labels from Logs -Author: Ed Welch +Author: Ed Welch Date: February 2019 This is the official version of this doc as of 2019/04/03, the original discussion was had via a [Google doc](https://docs.google.com/document/d/16y_XFux4h2oQkJdfQgMjqu3PUxMBAq71FoKC_SkHzvk/edit?usp=sharing), which is being kept for posterity but will not be updated moving forward. @@ -10,7 +13,7 @@ We should be able to filter logs by labels extracted from log content. Keeping in mind: Loki is not a log search tool and we need to discourage the use of log labels as an attempt to recreate log search functionality. Having a label on “order number” would be bad, however, having a label on “orderType=plant” and then filtering the results on a time window with an order number would be fine. (think: grep “plant” | grep “12324134” ) -Loki as a grep replacement, log tailing or log scrolling tool is highly desirable, log labels will be useful in reducing query results and improving query performance, combined with logQL to narrow down results. +Loki as a grep replacement, log tailing or log scrolling tool is highly desirable, log labels will be useful in reducing query results and improving query performance, combined with logQL to narrow down results. ## Use Cases @@ -22,7 +25,7 @@ Examples: ## Challenges -* Logs are often unstructured data, it can be very difficult to extract reliable data from some unstructured formats, often requiring the use of complicated regular expressions. +* Logs are often unstructured data, it can be very difficult to extract reliable data from some unstructured formats, often requiring the use of complicated regular expressions. * Easy to abuse. Easy to create a Label with high cardinality, even possibly by accident with a rogue regular expression. * Where do we extract metrics and labels at the client (Promtail or other?) or Loki? Extraction at the server (Loki) side has some pros/cons. Can we do both? At least with labels we could define a set of expected labels and if loki doesn’t receive them they could be extracted. * Server side extraction would improve interoperability at the expense of increase server workload and cost. @@ -44,13 +47,13 @@ https://github.com/fstab/grok_exporter If you are familiar with Grok this would be more comfortable, many people use ELK stacks and would likely be familiar with or already have Grok strings for their logs, making it easy to use grok_exporter to extract metrics. -One caveat is the dependency on the oniguruma C library which parses the regular expressions. +One caveat is the dependency on the oniguruma C library which parses the regular expressions. ## Implementation ### Details -As mentioned previously in the challenges for working with unstructured data, there isn’t a good one size fits all solution for extracting structured data. +As mentioned previously in the challenges for working with unstructured data, there isn’t a good one size fits all solution for extracting structured data. The Docker log format is an example where multiple levels of processing may be required, where the docker log is json, however, it also contains the log message field which itself could be embedded json, or a log message which needs regex parsing. @@ -90,7 +93,7 @@ Our pipelined config might look like this: scrape_configs: - job_name: system entry_parsers: - - json: + - json: timestamp: source: time format: RFC3339 @@ -110,20 +113,20 @@ scrape_configs: Looking at this a little closer: ```yaml - - json: - timestamp: - source: time + - json: + timestamp: + source: time format: TODO ① labels: - stream: + stream: source: json_key_name.json_sub_key_name ② output: log ③ ``` -① The format key will likely be a format string for Go’s time.Parse or a format string for strptime, this still needs to be decided, but the idea would be to specify a format string used to extract the timestamp data, for the regex parser there would also need to be a expr key used to extract the timestamp. -② One of the json elements was “stream” so we extract that as a label, if the json value matches the desired label name it should only be required to specify the label name as a key, if some mapping is required you can optionally provide a “source” key to specify where to find the label in the document. (Note the use of `json_key_name.json_sub_key_name` is just an example here and doesn't match our example log) -③ Tell the pipeline which element from the json to send to the next stage. +① The format key will likely be a format string for Go’s time.Parse or a format string for strptime, this still needs to be decided, but the idea would be to specify a format string used to extract the timestamp data, for the regex parser there would also need to be a expr key used to extract the timestamp. +② One of the json elements was “stream” so we extract that as a label, if the json value matches the desired label name it should only be required to specify the label name as a key, if some mapping is required you can optionally provide a “source” key to specify where to find the label in the document. (Note the use of `json_key_name.json_sub_key_name` is just an example here and doesn't match our example log) +③ Tell the pipeline which element from the json to send to the next stage. ```yaml - regex: @@ -132,8 +135,8 @@ Looking at this a little closer: level: ② ``` -① Define the Go RE2 regex, making sure to use a named capture group. -② Extract labels using the named capture group names. +① Define the Go RE2 regex, making sure to use a named capture group. +② Extract labels using the named capture group names. Notice there was not an output section defined here, omitting the output key should instruct the parser to return the incoming log message to the next stage with no changes. @@ -151,7 +154,7 @@ There is an alternative configuration that could be used here to accomplish the scrape_configs: - job_name: system entry_parsers: - - json: + - json: timestamp: source: time format: FIXME @@ -167,7 +170,7 @@ scrape_configs: output: message ``` -① Similar to the json parser, if your log label matches the regex named group, you need only specify the label name as a yaml key +① Similar to the json parser, if your log label matches the regex named group, you need only specify the label name as a yaml key ② If you had a use case for specifying a different label name from the regex group name you can optionally provide the `source` key with the value matching the named capture group. You can define a more complicated regular expression with multiple capture groups to extract many labels and/or the output log message in one entry parser. This has the advantage of being more performant, however, the regular expression will also get much more complicated. @@ -175,8 +178,8 @@ You can define a more complicated regular expression with multiple capture group Please also note the regex for `message` is incomplete and would do a terrible job of matching any standard log message which might contain spaces or non alpha characters. ### Concerns -* Debugging, especially if a pipeline stage is mutating the log entry. -* Clashing labels and how to handle this (two stages try to set the same label) +* Debugging, especially if a pipeline stage is mutating the log entry. +* Clashing labels and how to handle this (two stages try to set the same label) * Performance vs ease of writing/use, if every label is extracted one at a time and there are a lot of labels and a long line, it would force reading the line many times, however contrast this to a really long complicated regex which only has to read the line once but is difficult to write and/or change and maintain ### Further improvements diff --git a/docs/sources/getting-started/_index.md b/docs/sources/getting-started/_index.md new file mode 100644 index 000000000000..81f57ae63038 --- /dev/null +++ b/docs/sources/getting-started/_index.md @@ -0,0 +1,11 @@ +--- +title: Getting started +weight: 300 +--- +# Getting started with Loki + +1. [Grafana](grafana/) +2. [LogCLI](logcli/) +3. [Labels](labels/) +4. [Troubleshooting](troubleshooting/) + diff --git a/docs/getting-started/get-logs-into-loki.md b/docs/sources/getting-started/get-logs-into-loki.md similarity index 89% rename from docs/getting-started/get-logs-into-loki.md rename to docs/sources/getting-started/get-logs-into-loki.md index 572fadd69a7f..27b61a96c6a8 100644 --- a/docs/getting-started/get-logs-into-loki.md +++ b/docs/sources/getting-started/get-logs-into-loki.md @@ -1,10 +1,13 @@ +--- +title: Get logs into Loki +--- # Get logs into Loki -After you [install and run Loki](./installation/local.md), you probably want to get logs from other applications into it. +After you [install and run Loki](../../installation/local/), you probably want to get logs from other applications into it. -To get application logs into Loki, you need to edit the [Promtail](./clients/promtail/README.md) config file. +To get application logs into Loki, you need to edit the [Promtail]({{< relref "../clients/promtail" >}}) config file. -Detailed information about configuring Promtail is available in [Promtail configuration](./clients/promtail/configuration.md). +Detailed information about configuring Promtail is available in [Promtail configuration](../../clients/promtail/configuration/). The following instructions should help you get started. @@ -15,7 +18,7 @@ wget https://github.com/grafana/loki/blob/master/cmd/promtail/promtail-local-con ``` 2. Open the config file in the text editor of your choice. It should look similar to this: - + ``` server: http_listen_port: 9080 @@ -39,8 +42,8 @@ scrape_configs: The seven lines under `scrape_configs` are what send the logs that Loki generates to Loki, which then outputs them in the command line and http://localhost:3100/metrics. -3. Copy the seven lines under `scrape_configs`, and then paste them under the original job (you can also just edit the original seven lines). - +3. Copy the seven lines under `scrape_configs`, and then paste them under the original job (you can also just edit the original seven lines). + Below is an example that sends logs from a default Grafana installation to Loki. We updated the following fields: - job_name - This differentiates the logs collected from other log groups. - targets - Optional for static_configs, however is often defined because in older versions of Promtail it was not optional. This was an artifact from directly using the Prometheus service discovery code which required this entry. diff --git a/docs/getting-started/grafana.md b/docs/sources/getting-started/grafana.md similarity index 97% rename from docs/getting-started/grafana.md rename to docs/sources/getting-started/grafana.md index c6e6408abbba..299aa8d17680 100644 --- a/docs/getting-started/grafana.md +++ b/docs/sources/getting-started/grafana.md @@ -1,3 +1,6 @@ +--- +title: Loki in Grafana +--- # Loki in Grafana Grafana ships with built-in support for Loki for versions greater than diff --git a/docs/getting-started/labels.md b/docs/sources/getting-started/labels.md similarity index 98% rename from docs/getting-started/labels.md rename to docs/sources/getting-started/labels.md index b72e8f2c1483..e83356c34da9 100644 --- a/docs/getting-started/labels.md +++ b/docs/sources/getting-started/labels.md @@ -1,3 +1,6 @@ +--- +title: Labels +--- # Labels Labels are key value pairs and can be defined as anything! We like to refer to them as metadata to describe a log stream. If you are familiar with Prometheus, there are a few labels you are used to seeing like `job` and `instance`, and I will use those in the coming examples. @@ -130,7 +133,7 @@ The two previous examples use statically defined labels with a single value; how __path__: /var/log/apache.log ``` -This regex matches every component of the log line and extracts the value of each component into a capture group. Inside the pipeline code, this data is placed in a temporary data structure that allows using it for several purposes during the processing of that log line (at which point that temp data is discarded). Much more detail about this can be found [here](../clients/promtail/pipelines.md). +This regex matches every component of the log line and extracts the value of each component into a capture group. Inside the pipeline code, this data is placed in a temporary data structure that allows using it for several purposes during the processing of that log line (at which point that temp data is discarded). Much more detail about this can be found [here](../../clients/promtail/pipelines/). From that regex, we will be using two of the capture groups to dynamically set two labels based on content from the log line itself: @@ -185,7 +188,7 @@ Now let's talk about Loki, where the index is typically an order of magnitude sm Loki will effectively keep your static costs as low as possible (index size and memory requirements as well as static log storage) and make the query performance something you can control at runtime with horizontal scaling. -To see how this works, let's look back at our example of querying your access log data for a specific IP address. We don't want to use a label to store the IP. Instead we use a [filter expression](../logql.md#filter-expression) to query for it: +To see how this works, let's look back at our example of querying your access log data for a specific IP address. We don't want to use a label to store the IP. Instead we use a [filter expression](../../logql#filter-expression) to query for it: ``` {job=”apache”} |= “11.11.11.11” diff --git a/docs/getting-started/logcli.md b/docs/sources/getting-started/logcli.md similarity index 99% rename from docs/getting-started/logcli.md rename to docs/sources/getting-started/logcli.md index ccbdc8263a35..d4c686d90248 100644 --- a/docs/getting-started/logcli.md +++ b/docs/sources/getting-started/logcli.md @@ -1,3 +1,6 @@ +--- +title: LogCLI +--- # Querying Loki with LogCLI If you prefer a command line interface, LogCLI also allows users to run LogQL diff --git a/docs/getting-started/troubleshooting.md b/docs/sources/getting-started/troubleshooting.md similarity index 95% rename from docs/getting-started/troubleshooting.md rename to docs/sources/getting-started/troubleshooting.md index 579c70f77f39..c5f0dff142d0 100644 --- a/docs/getting-started/troubleshooting.md +++ b/docs/sources/getting-started/troubleshooting.md @@ -1,3 +1,6 @@ +--- +title: Troubleshooting +--- # Troubleshooting Loki ## "Loki: Bad Gateway. 502" @@ -43,11 +46,11 @@ Promtail yet. There may be one of many root causes: Promtail exposes two web pages that can be used to understand how its service discovery works. -The service discovery page (`/service-discovery`) shows all +The service discovery page (../service-discovery`) shows all discovered targets with their labels before and after relabeling as well as the reason why the target has been dropped. -The targets page (`/targets`) displays only targets that are being actively +The targets page (../targets`) displays only targets that are being actively scraped and their respective labels, files, and positions. On Kubernetes, you can access those two pages by port-forwarding the Promtail @@ -106,7 +109,7 @@ kubectl exec -it promtail-bth9q -- /bin/sh Once connected, verify the config in `/etc/promtail/promtail.yml` has the contents you expect. -Also check `/var/log/positions.yaml` (`/run/promtail/positions.yaml` when +Also check `/var/log/positions.yaml` (../run/promtail/positions.yaml` when deployed by Helm or whatever value is specified for `positions.file`) and make sure Promtail is tailing the logs you would expect. diff --git a/docs/installation/README.md b/docs/sources/installation/_index.md similarity index 58% rename from docs/installation/README.md rename to docs/sources/installation/_index.md index 9ba4e0b52fcd..68f5e3042a2d 100644 --- a/docs/installation/README.md +++ b/docs/sources/installation/_index.md @@ -1,14 +1,18 @@ +--- +title: Installation +weight: 200 +--- # Install Loki ## Installation methods Instructions for different methods of installing Loki and Promtail. -- [Install using Tanka (recommended)](./tanka.md) -- [Install through Helm](./helm.md) -- [Install through Docker or Docker Compose](./docker.md) -- [Install and run locally](./local.md) -- [Install from source](./install-from-source.md) +- [Install using Tanka (recommended)](tanka/) +- [Install through Helm](helm/) +- [Install through Docker or Docker Compose](docker/) +- [Install and run locally](local/) +- [Install from source](install-from-source/) ## General process diff --git a/docs/installation/docker.md b/docs/sources/installation/docker.md similarity index 99% rename from docs/installation/docker.md rename to docs/sources/installation/docker.md index 3c8e077149e3..f7e2bb1d008d 100644 --- a/docs/installation/docker.md +++ b/docs/sources/installation/docker.md @@ -1,3 +1,6 @@ +--- +title: Docker +--- # Install Loki with Docker or Docker Compose You can install Loki and Promtail with Docker or Docker Compose if you are evaluating, testing, or developing Loki. diff --git a/docs/installation/helm.md b/docs/sources/installation/helm.md similarity index 96% rename from docs/installation/helm.md rename to docs/sources/installation/helm.md index 31d9f70bf4cb..e90fc0f78caa 100644 --- a/docs/installation/helm.md +++ b/docs/sources/installation/helm.md @@ -1,9 +1,12 @@ +--- +title: Helm +--- # Install Loki with Helm ## Prerequisites Make sure you have Helm [installed](https://helm.sh/docs/using_helm/#installing-helm) and -[deployed](https://helm.sh/docs/using_helm/#installing-tiller) to your cluster. +[deployed](https://helm.sh/docs/using_helm/#installing-tiller) to your cluster. Add [Loki's chart repository](https://github.com/grafana/loki/tree/master/production/helm/loki) to Helm: @@ -71,7 +74,7 @@ kubectl port-forward --namespace service/loki-grafana 3000:80 ``` Navigate to `http://localhost:3000` and login with `admin` and the password -output above. Then follow the [instructions for adding the Loki Data Source](../getting-started/grafana.md), using the URL +output above. Then follow the [instructions for adding the Loki Data Source](../../getting-started/grafana/), using the URL `http://loki:3100/` for Loki. ## Run Loki behind HTTPS ingress diff --git a/docs/installation/install-from-source.md b/docs/sources/installation/install-from-source.md similarity index 94% rename from docs/installation/install-from-source.md rename to docs/sources/installation/install-from-source.md index 52d9b14e1ab2..90a2c329b67a 100644 --- a/docs/installation/install-from-source.md +++ b/docs/sources/installation/install-from-source.md @@ -1,3 +1,6 @@ +--- +title: Build from source +--- # Build from source In order to build Loki manually, you need to clone the GitHub repo and then `make Loki`. diff --git a/docs/installation/local.md b/docs/sources/installation/local.md similarity index 90% rename from docs/installation/local.md rename to docs/sources/installation/local.md index 4ed82c2102cc..ad3b36756923 100644 --- a/docs/installation/local.md +++ b/docs/sources/installation/local.md @@ -1,3 +1,6 @@ +--- +title: Local +--- # Install and run Loki locally In order to log events with Loki, you must download and install both Promtail and Loki. @@ -9,7 +12,7 @@ In order to log events with Loki, you must download and install both Promtail an 1. Navigate to the [release page](https://github.com/grafana/loki/releases/). 2. Scroll down to the Assets section under the version that you want to install. 3. Download the Loki and Promtail .zip files that correspond to your system. - **Note:** Do not download LogCLI or Loki Canary at this time. [LogCLI](../getting-started/logcli.md) allows you to run Loki queries in a command line interface. [Loki Canary](../operations/loki-canary.md) is a tool to audit Loki performance. + **Note:** Do not download LogCLI or Loki Canary at this time. [LogCLI](../../getting-started/logcli/) allows you to run Loki queries in a command line interface. [Loki Canary](../../operations/loki-canary/) is a tool to audit Loki performance. 4. Unzip the package contents into the same directory. This is where the two programs will run. 5. In the command line, change directory (`cd` on most systems) to the directory with Loki and Promtail. Copy and paste the commands below into your command line to download generic configuration files: ``` @@ -31,7 +34,7 @@ wget https://raw.githubusercontent.com/grafana/loki/master/cmd/promtail/promtail Loki runs and displays Loki logs in your command line and on http://localhost:3100/metrics. -Congratulations, Loki is installed and running! Next, you might want edit the Promtail config file to [get logs into Loki](../getting-started/get-logs-into-loki.md). +Congratulations, Loki is installed and running! Next, you might want edit the Promtail config file to [get logs into Loki](../getting-started/get-logs-into-loki/). ## Release binaries - openSUSE Linux only diff --git a/docs/installation/tanka.md b/docs/sources/installation/tanka.md similarity index 99% rename from docs/installation/tanka.md rename to docs/sources/installation/tanka.md index 3a8564e5452a..9b76ce163515 100644 --- a/docs/installation/tanka.md +++ b/docs/sources/installation/tanka.md @@ -1,3 +1,6 @@ +--- +title: Tanka +--- # Install Loki with Tanka [Tanka](https://tanka.dev) is a reimplementation of @@ -44,7 +47,7 @@ loki + promtail + gateway { _config+:: { namespace: 'loki', htpasswd_contents: 'loki:$apr1$H4yGiGNg$ssl5/NymaGFRUvxIV1Nyr.', - + // S3 variables remove if not using aws storage_backend: 's3,dynamodb', s3_access_key: 'key', @@ -52,13 +55,13 @@ loki + promtail + gateway { s3_address: 'url', s3_bucket_name: 'loki-test', dynamodb_region: 'region', - + // GCS variables remove if not using gcs storage_backend: 'bigtable,gcs', bigtable_instance: 'instance', bigtable_project: 'project', gcs_bucket_name: 'bucket', - + promtail_config+: { clients: [{ scheme:: 'http', diff --git a/docs/logo.png b/docs/sources/logo.png similarity index 100% rename from docs/logo.png rename to docs/sources/logo.png diff --git a/docs/logo_and_name.png b/docs/sources/logo_and_name.png similarity index 100% rename from docs/logo_and_name.png rename to docs/sources/logo_and_name.png diff --git a/docs/logql.md b/docs/sources/logql/_index.md similarity index 99% rename from docs/logql.md rename to docs/sources/logql/_index.md index a9fd1b3a3b2f..7b373d7f1305 100644 --- a/docs/logql.md +++ b/docs/sources/logql/_index.md @@ -1,3 +1,7 @@ +--- +title: LogQL +weight: 700 +--- # LogQL: Log Query Language Loki comes with its own PromQL-inspired language for queries called *LogQL*. @@ -40,7 +44,7 @@ In this example, all log streams that have a label of `app` whose value is `mysq Note that this will match any log stream whose labels _at least_ contain `mysql-backup` for their name label; if there are multiple streams that contain that label, logs from all of the matching streams will be shown in the results. -The `=` operator after the label name is a **label matching operator**. +The `=` operator after the label name is a **label matching operator**. The following label matching operators are supported: - `=`: exactly equal. @@ -66,7 +70,7 @@ The search expression can be just text or regex: - `` {name="cassandra"} |~ `error=\w+` `` - `{instance=~"kafka-[23]",name="kafka"} != "kafka.server:type=ReplicaManager"` -In the previous examples, `|=`, `|~`, and `!=` act as **filter operators**. +In the previous examples, `|=`, `|~`, and `!=` act as **filter operators**. The following filter operators are supported: - `|=`: Log line contains string. diff --git a/docs/sources/maintaining/_index.md b/docs/sources/maintaining/_index.md new file mode 100644 index 000000000000..3ef53e47e76c --- /dev/null +++ b/docs/sources/maintaining/_index.md @@ -0,0 +1,10 @@ +--- +title: Maintaining +weight: 1200 +--- +# Loki Maintainers Guide + +This section details information for maintainers of Loki. + +1. [Releasing Loki](release/) +2. [Releasing `loki-build-image`](release-loki-build-image/) diff --git a/docs/maintaining/release-loki-build-image.md b/docs/sources/maintaining/release-loki-build-image.md similarity index 69% rename from docs/maintaining/release-loki-build-image.md rename to docs/sources/maintaining/release-loki-build-image.md index d616caaa217a..253e96fa4d85 100644 --- a/docs/maintaining/release-loki-build-image.md +++ b/docs/sources/maintaining/release-loki-build-image.md @@ -1,6 +1,9 @@ +--- +title: Releasing Loki Build Image +--- # Releasing `loki-build-image` -The [`loki-build-image`](../../loki-build-image/) is the Docker image used to run tests and build Loki binaries in CI. +The [`loki-build-image`](https://github.com/grafana/loki/tree/master/loki-build-image) is the Docker image used to run tests and build Loki binaries in CI. ## How To Perform a Release diff --git a/docs/maintaining/release.md b/docs/sources/maintaining/release.md similarity index 99% rename from docs/maintaining/release.md rename to docs/sources/maintaining/release.md index a7ab59d2bbb6..ab52006909ba 100644 --- a/docs/maintaining/release.md +++ b/docs/sources/maintaining/release.md @@ -1,3 +1,6 @@ +--- +title: Releasing Loki +--- # Releasing Loki This document is a series of instructions for core Loki maintainers to be able diff --git a/docs/sources/operations/_index.md b/docs/sources/operations/_index.md new file mode 100644 index 000000000000..8ad7d0d51848 --- /dev/null +++ b/docs/sources/operations/_index.md @@ -0,0 +1,15 @@ +--- +title: Operations +weight: 800 +--- +# Operating Loki + +1. [Upgrading](upgrade/) +2. [Authentication](authentication/) +3. [Observability](observability/) +4. [Scalability](scalability/) +5. [Storage](storage/) + 1. [Table Manager](storage/table-manager/) + 2. [Retention](storage/retention/) +6. [Multi-tenancy](multi-tenancy/) +7. [Loki Canary](loki-canary/) diff --git a/docs/operations/authentication.md b/docs/sources/operations/authentication.md similarity index 82% rename from docs/operations/authentication.md rename to docs/sources/operations/authentication.md index 1ed847de52c7..eed9125fee11 100644 --- a/docs/operations/authentication.md +++ b/docs/sources/operations/authentication.md @@ -1,3 +1,6 @@ +--- +title: Authentication +--- # Authentication with Loki Loki does not come with any included authentication layer. Operators are @@ -8,7 +11,7 @@ Note that when using Loki in multi-tenant mode, Loki requires the HTTP header `X-Scope-OrgID` to be set to a string identifying the tenant; the responsibility of populating this value should be handled by the authenticating reverse proxy. For more information on multi-tenancy please read its -[documentation](multi-tenancy.md). +[documentation](../multi-tenancy/). For information on authenticating Promtail, please see the docs for [how to -configure Promtail](../clients/promtail/configuration.md). +configure Promtail](../../clients/promtail/configuration/). diff --git a/docs/operations/canary.png b/docs/sources/operations/canary.png similarity index 100% rename from docs/operations/canary.png rename to docs/sources/operations/canary.png diff --git a/docs/operations/loki-canary-block.png b/docs/sources/operations/loki-canary-block.png similarity index 100% rename from docs/operations/loki-canary-block.png rename to docs/sources/operations/loki-canary-block.png diff --git a/docs/operations/loki-canary.md b/docs/sources/operations/loki-canary.md similarity index 97% rename from docs/operations/loki-canary.md rename to docs/sources/operations/loki-canary.md index 1b9be3f1d52c..cad19f971bfd 100644 --- a/docs/operations/loki-canary.md +++ b/docs/sources/operations/loki-canary.md @@ -1,13 +1,16 @@ +--- +title: Loki Canary +--- # Loki Canary -![canary](canary.png) +![canary](../canary.png) Loki Canary is a standalone app that audits the log capturing performance of Loki. ## How it works -![block_diagram](loki-canary-block.png) +![block_diagram](../loki-canary-block.png) Loki Canary writes a log to a file and stores the timestamp in an internal array. The contents look something like this: @@ -63,9 +66,9 @@ Every `-spot-check-query-rate`, Loki will be queried for each entry in this list `loki_canary_spot_check_entries_total` will be incremented, if a result is missing `loki_canary_spot_check_missing_entries_total` will be incremented. -The defaults of `15m` for `spot-check-interval` and `4h` for `spot-check-max` +The defaults of `15m` for `spot-check-interval` and `4h` for `spot-check-max` means that after 4 hours of running the canary will have a list of 16 entries -it will query every minute (default `spot-check-query-rate` interval is 1m), +it will query every minute (default `spot-check-query-rate` interval is 1m), so be aware of the query load this can put on Loki if you have a lot of canaries. #### Metric Test @@ -78,11 +81,11 @@ created by the canary. by default every `15m` the canary will run a `count_over_time` instant-query to Loki for a range of `24h`. -If the canary has not run for `-metric-test-range` (`24h`) the query range is adjusted -to the amount of time the canary has been running such that the rate can be calculated +If the canary has not run for `-metric-test-range` (`24h`) the query range is adjusted +to the amount of time the canary has been running such that the rate can be calculated since the canary was started. -The canary calculates what the expected count of logs would be for the range +The canary calculates what the expected count of logs would be for the range (also adjusting this based on canary runtime) and compares the expected result with the actual result returned from Loki. The _difference_ is stored as the value in the gauge `loki_canary_metric_test_deviation` @@ -91,12 +94,12 @@ It's expected that there will be some deviation, the method of creating an expec calculation based on the query rate compared to actual query data is imperfect and will lead to a deviation of a few log entries. -It's not expected for there to be a deviation of more than 3-4 log entries. +It's not expected for there to be a deviation of more than 3-4 log entries. ### Control -Loki Canary responds to two endpoints to allow dynamic suspending/resuming of the -canary process. This can be useful if you'd like to quickly disable or reenable the +Loki Canary responds to two endpoints to allow dynamic suspending/resuming of the +canary process. This can be useful if you'd like to quickly disable or reenable the canary. To stop or start the canary issue an HTTP GET request against the `/suspend` or `/resume` endpoints. diff --git a/docs/operations/multi-tenancy.md b/docs/sources/operations/multi-tenancy.md similarity index 95% rename from docs/operations/multi-tenancy.md rename to docs/sources/operations/multi-tenancy.md index 3366dbaf998e..e7a76486d26c 100644 --- a/docs/operations/multi-tenancy.md +++ b/docs/sources/operations/multi-tenancy.md @@ -1,3 +1,6 @@ +--- +title: Multi-tenancy +--- # Loki Multi-Tenancy Loki is a multi-tenant system; requests and data for tenant A are isolated from diff --git a/docs/operations/observability.md b/docs/sources/operations/observability.md similarity index 97% rename from docs/operations/observability.md rename to docs/sources/operations/observability.md index efaf67fcf39a..b2fc800e326e 100644 --- a/docs/operations/observability.md +++ b/docs/sources/operations/observability.md @@ -1,3 +1,6 @@ +--- +title: Observability +--- # Observing Loki Both Loki and Promtail expose a `/metrics` endpoint that expose Prometheus @@ -74,14 +77,14 @@ Most of these metrics are counters and should continuously increase during norma If Promtail uses any pipelines with metrics stages, those metrics will also be exposed by Promtail at its `/metrics` endpoint. See Promtail's documentation on -[Pipelines](../clients/promtail/pipelines.md) for more information. +[Pipelines](../../clients/promtail/pipelines/) for more information. An example Grafana dashboard was built by the community and is available as dashboard [10004](https://grafana.com/dashboards/10004). ## Mixins -The Loki repository has a [mixin](../../production/loki-mixin) that includes a +The Loki repository has a [mixin](https://github.com/grafana/loki/blob/master/production/loki-mixin) that includes a set of dashboards, recording rules, and alerts. Together, the mixin gives you a comprehensive package for monitoring Loki in production. diff --git a/docs/operations/scalability.md b/docs/sources/operations/scalability.md similarity index 76% rename from docs/operations/scalability.md rename to docs/sources/operations/scalability.md index 47db0bbc4382..7b05d67641cc 100644 --- a/docs/operations/scalability.md +++ b/docs/sources/operations/scalability.md @@ -1,3 +1,6 @@ +--- +title: Scalability +--- # Scaling with Loki See this @@ -6,6 +9,6 @@ on a discussion about Loki's scalability. When scaling Loki, operators should consider running several Loki processes partitioned by role (ingester, distributor, querier) rather than a single Loki -process. Grafana Labs' [production setup](../../production/ksonnet/loki) +process. Grafana Labs' [production setup](https://github.com/grafana/loki/blob/master/production/ksonnet/loki) contains `.libsonnet` files that demonstrates configuring separate components and scaling for resource usage. diff --git a/docs/operations/storage/README.md b/docs/sources/operations/storage/_index.md similarity index 91% rename from docs/operations/storage/README.md rename to docs/sources/operations/storage/_index.md index c3d37917bc6a..7986b88ee037 100644 --- a/docs/operations/storage/README.md +++ b/docs/sources/operations/storage/_index.md @@ -1,3 +1,6 @@ +--- +title: Storage +--- # Loki Storage Loki needs to store two different types of data: **chunks** and **indexes**. @@ -10,13 +13,13 @@ format](#chunk-format) for how chunks are stored internally. The **index** stores each stream's label set and links them to the individual chunks. -Refer to Loki's [configuration](../../configuration/README.md) for details on +Refer to Loki's [configuration](../../configuration/) for details on how to configure the storage and the index. For more information: -1. [Table Manager](table-manager.md) -2. [Retention](retention.md) +1. [Table Manager](table-manager/) +2. [Retention](retention/) ## Supported Stores @@ -26,7 +29,7 @@ The following are supported for the index: * [Google Bigtable](https://cloud.google.com/bigtable) * [Apache Cassandra](https://cassandra.apache.org) * [BoltDB](https://github.com/boltdb/bolt) (doesn't work when clustering Loki) -* [BoltDB Shipper](boltdb-shipper.md) EXPERIMENTAL index store which stores boltdb index files in the object store +* [BoltDB Shipper](boltdb-shipper/) EXPERIMENTAL index store which stores boltdb index files in the object store The following are supported for the chunks: @@ -35,7 +38,7 @@ The following are supported for the chunks: * [Apache Cassandra](https://cassandra.apache.org) * [Amazon S3](https://aws.amazon.com/s3) * [Google Cloud Storage](https://cloud.google.com/storage/) -* [Filesystem](filesystem.md) (please read more about the filesystem to understand the pros/cons before using with production data) +* [Filesystem](filesystem/) (please read more about the filesystem to understand the pros/cons before using with production data) ## Cloud Storage Permissions @@ -74,7 +77,7 @@ Resources: `arn:aws:dynamodb:::table/*` Resources: `*` -#### AutoScaling +#### AutoScaling If you enable autoscaling from table manager, the following permissions are needed: diff --git a/docs/operations/storage/boltdb-shipper.md b/docs/sources/operations/storage/boltdb-shipper.md similarity index 99% rename from docs/operations/storage/boltdb-shipper.md rename to docs/sources/operations/storage/boltdb-shipper.md index 56911c179e94..5a0275b6b2d7 100644 --- a/docs/operations/storage/boltdb-shipper.md +++ b/docs/sources/operations/storage/boltdb-shipper.md @@ -1,3 +1,6 @@ +--- +title: BoltDB Shipper +--- # Loki with BoltDB Shipper :warning: BoltDB Shipper is still an experimental feature. It is not recommended to be used in production environments. @@ -30,7 +33,7 @@ storage_config: active_index_directory: /loki/index shared_store: gcs cache_location: /loki/boltdb-cache -``` +``` This would run Loki with BoltDB Shipper storing BoltDB files locally at `/loki/index` and chunks at configured `GCS_BUCKET_NAME`. It would also keep shipping BoltDB files periodically to same configured bucket. diff --git a/docs/operations/storage/filesystem.md b/docs/sources/operations/storage/filesystem.md similarity index 93% rename from docs/operations/storage/filesystem.md rename to docs/sources/operations/storage/filesystem.md index 7a161ef25692..f747a819c927 100644 --- a/docs/operations/storage/filesystem.md +++ b/docs/sources/operations/storage/filesystem.md @@ -1,3 +1,6 @@ +--- +title: Filesystem +--- # Filesystem Object Store The filesystem object store is the easiest to get started with Loki but there are some pros/cons to this approach. @@ -14,7 +17,7 @@ A folder is created for every tenant all the chunks for one tenant are stored in If loki is run in single-tenant mode, all the chunks are put in a folder named `fake` which is the synthesized tenant name used for single tenant mode. -See [multi-tenancy](../multi-tenancy.md) for more information. +See [multi-tenancy](../../multi-tenancy/) for more information. ## Pros @@ -44,7 +47,7 @@ Running Loki clustered is not possible with the filesystem store unless the file **WARNING** as the title suggests, this is very new and potentially buggy, and it is also very likely configs around this feature will change over time. -With that warning out of the way, the addition of the [boltdb-shipper](boltdb-shipper.md) index store has added capabilities making it possible to overcome many of the limitations listed above using the filesystem store, specifically running Loki with the filesystem store on separate machines but still operate as a cluster supporting replication, and write distribution via the hash ring. +With that warning out of the way, the addition of the [boltdb-shipper](../boltdb-shipper/) index store has added capabilities making it possible to overcome many of the limitations listed above using the filesystem store, specifically running Loki with the filesystem store on separate machines but still operate as a cluster supporting replication, and write distribution via the hash ring. As mentioned in the title, this is very alpha at this point but we would love for people to try this and help us flush out bugs. @@ -66,7 +69,7 @@ ingester: max_chunk_age: 2h # Let chunks get at least 2h old before flushing due to age, this helps to reduce total chunks in store chunk_target_size: 1048576 # Target chunks of 1MB, this helps to reduce total chunks in store chunk_retain_period: 30s - + query_store_max_look_back_period: -1 # This will allow the ingesters to query the store for all data lifecycler: heartbeat_period: 5s diff --git a/docs/operations/storage/retention.md b/docs/sources/operations/storage/retention.md similarity index 80% rename from docs/operations/storage/retention.md rename to docs/sources/operations/storage/retention.md index 95d691896f88..d2b906827aa4 100644 --- a/docs/operations/storage/retention.md +++ b/docs/sources/operations/storage/retention.md @@ -1,9 +1,12 @@ +--- +title: Retention +--- # Loki Storage Retention -Retention in Loki is achieved through the [Table Manager](./table-manager.md). +Retention in Loki is achieved through the [Table Manager](../table-manager/). In order to enable the retention support, the Table Manager needs to be configured to enable deletions and a retention period. Please refer to the -[`table_manager_config`](../../configuration/README.md#table_manager_config) +[`table_manager_config`](../../../configuration#table_manager_config) section of the Loki configuration reference for all available options. Alternatively, the `table-manager.retention-period` and `table-manager.retention-deletes-enabled` command line flags can be used. The @@ -11,12 +14,12 @@ provided retention period needs to be a duration represented as a string that can be parsed using Go's [time.Duration](https://golang.org/pkg/time/#ParseDuration). > **WARNING**: The retention period must be a multiple of the index and chunks table -`period`, configured in the [`period_config`](../../configuration/README.md#period_config) -block. See the [Table Manager](./table-manager.md#retention) documentation for +`period`, configured in the [`period_config`](../../../configuration#period_config) +block. See the [Table Manager](../table-manager#retention) documentation for more information. > **NOTE**: To avoid querying of data beyond the retention period, -`max_look_back_period` config in [`chunk_store_config`](../../configuration/README.md#chunk_store_config) must be set to a value less than or equal to +`max_look_back_period` config in [`chunk_store_config`](../../../configuration#chunk_store_config) must be set to a value less than or equal to what is set in `table_manager.retention_period`. When using S3 or GCS, the bucket storing the chunks needs to have the expiry @@ -36,7 +39,7 @@ intact; you will still be able to see related labels but will be unable to retrieve the deleted log content. For further details on the Table Manager internals, refer to the -[Table Manager](./table-manager.md) documentation. +[Table Manager](../table-manager/) documentation. ## Example Configuration diff --git a/docs/operations/storage/table-manager-active-vs-inactive-tables.png b/docs/sources/operations/storage/table-manager-active-vs-inactive-tables.png similarity index 100% rename from docs/operations/storage/table-manager-active-vs-inactive-tables.png rename to docs/sources/operations/storage/table-manager-active-vs-inactive-tables.png diff --git a/docs/operations/storage/table-manager-periodic-tables.png b/docs/sources/operations/storage/table-manager-periodic-tables.png similarity index 100% rename from docs/operations/storage/table-manager-periodic-tables.png rename to docs/sources/operations/storage/table-manager-periodic-tables.png diff --git a/docs/operations/storage/table-manager-retention.png b/docs/sources/operations/storage/table-manager-retention.png similarity index 100% rename from docs/operations/storage/table-manager-retention.png rename to docs/sources/operations/storage/table-manager-retention.png diff --git a/docs/operations/storage/table-manager.md b/docs/sources/operations/storage/table-manager.md similarity index 89% rename from docs/operations/storage/table-manager.md rename to docs/sources/operations/storage/table-manager.md index 1557c6c976a5..08d8603370d5 100644 --- a/docs/operations/storage/table-manager.md +++ b/docs/sources/operations/storage/table-manager.md @@ -1,3 +1,6 @@ +--- +title: Table manager +--- # Table Manager Loki supports storing indexes and chunks in table-based data storages. When @@ -34,7 +37,7 @@ to store chunks, are not managed by the Table Manager, and a custom bucket polic should be set to delete old data. For detailed information on configuring the Table Manager, refer to the -[`table_manager`](../../configuration/README.md#table_manager_config) +[`table_manager`](../../../configuration#table_manager_config) section in the Loki configuration document. @@ -43,10 +46,10 @@ section in the Loki configuration document. A periodic table stores the index or chunk data relative to a specific period of time. The duration of the time range of the data stored in a single table and its storage type is configured in the -[`schema_config`](../../configuration/README.md#schema_config) configuration +[`schema_config`](../../../configuration#schema_config) configuration block. -The [`schema_config`](../../configuration/README.md#schema_config) can contain +The [`schema_config`](../../../configuration#schema_config) can contain one or more `configs`. Each config, defines the storage used between the day set in `from` (in the format `yyyy-mm-dd`) and the next config, or "now" in the case of the last schema config entry. @@ -55,7 +58,7 @@ This allows to have multiple non-overlapping schema configs over the time, in order to perform schema version upgrades or change storage settings (including changing the storage type). -![periodic_tables](./table-manager-periodic-tables.png) +![periodic_tables](../table-manager-periodic-tables.png) The write path hits the table where the log entry timestamp falls into (usually the last table, except short periods close to the end of a table and the @@ -100,7 +103,7 @@ order to make sure that the new table is ready once the current table end period is reached. The `creation_grace_period` property - in the -[`table_manager`](../../configuration/README.md#table_manager_config) +[`table_manager`](../../../configuration#table_manager_config) configuration block - defines how long before a table should be created. @@ -128,14 +131,14 @@ is deleted, the Table Manager keeps the last tables alive using this formula: number_of_tables_to_keep = floor(retention_period / table_period) + 1 ``` -![retention](./table-manager-retention.png) +![retention](../table-manager-retention.png) It's important to note that - due to the internal implementation - the table `period` and `retention_period` **must** be multiples of `24h` in order to get the expected behavior. For detailed information on configuring the retention, refer to the -[Loki Storage Retention](./retention.md) +[Loki Storage Retention](../retention/) documentation. @@ -144,10 +147,10 @@ documentation. A table can be active or inactive. A table is considered **active** if the current time is within the range: -- Table start period - [`creation_grace_period`](../../configuration/README.md#table_manager_config) +- Table start period - [`creation_grace_period`](../../../configuration#table_manager_config) - Table end period + max chunk age (hardcoded to `12h`) -![active_vs_inactive_tables](./table-manager-active-vs-inactive-tables.png) +![active_vs_inactive_tables](../table-manager-active-vs-inactive-tables.png) Currently, the difference between an active and inactive table **only applies to the DynamoDB storage** settings: capacity mode (on-demand or provisioned), @@ -195,14 +198,14 @@ The Table Manager can be executed in two ways: ### Monolithic mode -When Loki runs in [monolithic mode](../../architecture.md#modes-of-operation), +When Loki runs in [monolithic mode](../../../architecture#modes-of-operation), the Table Manager is also started as component of the entire stack. ### Microservices mode -When Loki runs in [microservices mode](../../architecture.md#modes-of-operation), +When Loki runs in [microservices mode](../../../architecture#modes-of-operation), the Table Manager should be started as separate service named `table-manager`. You can check out a production grade deployment example at -[`table-manager.libsonnet`](../../../production/ksonnet/loki/table-manager.libsonnet). +[`table-manager.libsonnet`](https://github.com/grafana/loki/tree/master/production/ksonnet/loki/table-manager.libsonnet). diff --git a/docs/operations/upgrade.md b/docs/sources/operations/upgrade.md similarity index 97% rename from docs/operations/upgrade.md rename to docs/sources/operations/upgrade.md index 2e4ee23efa70..7e7c856ff89b 100644 --- a/docs/operations/upgrade.md +++ b/docs/sources/operations/upgrade.md @@ -1,3 +1,6 @@ +--- +title: Upgrade +--- # Upgrading Loki Every attempt is made to keep Loki backwards compatible, such that upgrades should be low risk and low friction. @@ -10,9 +13,9 @@ On this page we will document any upgrade issues/gotchas/considerations we are a ## Master / Unreleased Configuration document has been re-orderd a bit and for all the config, corresponding `CLI` flag is -provided. +provided. -S3 config now supports exapnded config. Example can be found here [s3_expanded_config](../configuration/examples.md#s3-expanded-config) +S3 config now supports exapnded config. Example can be found here [s3_expanded_config](../../configuration/examples#s3-expanded-config) ### Breaking CLI flags changes @@ -130,7 +133,7 @@ exit docker run -d --name=loki --mount source=loki-data,target=/loki -p 3100:3100 grafana/loki:1.5.0 ``` -Notice the change in the `target=/loki` for 1.5.0 to the new data directory location specified in the [included Loki config file](../../cmd/loki/loki-docker-config.yaml). +Notice the change in the `target=/loki` for 1.5.0 to the new data directory location specified in the [included Loki config file](https://github.com/grafana/loki/tree/master/cmd/loki/loki-docker-config.yaml). The intermediate step of using an ubuntu image to change the ownership of the Loki files to the new user might not be necessary if you can easily access these files to run the `chown` command directly. That is if you have access to `/var/lib/docker/volumes` or if you mounted to a different local filesystem directory, you can change the ownership directly without using a container. @@ -185,7 +188,7 @@ Loki 1.4.0 vendors Cortex v0.7.0-rc.0 which contains [several breaking config ch One such config change which will affect Loki users: -In the [cache_config](../configuration/README.md#cache_config): +In the [cache_config](../../configuration#cache_config): `defaul_validity` has changed to `default_validity` diff --git a/docs/overview/README.md b/docs/sources/overview/_index.md similarity index 97% rename from docs/overview/README.md rename to docs/sources/overview/_index.md index fd81b05b69bf..c73c1bffd685 100644 --- a/docs/overview/README.md +++ b/docs/sources/overview/_index.md @@ -1,3 +1,7 @@ +--- +title: Overview +weight: 100 +--- # Overview of Loki Grafana Loki is a set of components that can be composed into a fully featured @@ -8,7 +12,7 @@ labels for logs and leaving the original log message unindexed. This means that Loki is cheaper to operate and can be orders of magnitude more efficient. For a more detailed version of this same document, please read -[Architecture](../architecture.md). +[Architecture](../architecture/). ## Multi Tenancy @@ -31,7 +35,7 @@ to scale independently of each other. ### Distributor The **distributor** service is responsible for handling logs written by -[clients](../clients/README.md). It's essentially the "first stop" in the write +[clients](../clients/). It's essentially the "first stop" in the write path for log data. Once the distributor receives log data, it splits them into batches and sends them to multiple [ingesters](#ingester) in parallel. @@ -119,7 +123,7 @@ given time. ### Querier -The **querier** service handles the actual [LogQL](../logql.md) evaluation of +The **querier** service handles the actual [LogQL](../logql/) evaluation of logs stored in long-term storage. It first tries to query all ingesters for in-memory data before falling back to diff --git a/docs/overview/comparisons.md b/docs/sources/overview/comparisons.md similarity index 98% rename from docs/overview/comparisons.md rename to docs/sources/overview/comparisons.md index b030df7c34cd..74bd2305f00e 100644 --- a/docs/overview/comparisons.md +++ b/docs/sources/overview/comparisons.md @@ -1,3 +1,6 @@ +--- +title: Comparisons +--- # Loki compared to other log systems ## Loki / Promtail / Grafana vs EFK @@ -15,7 +18,7 @@ horizontally-scalable mode data is stored in a cloud storage system such as S3, GCS, or Cassandra. Logs are stored in plaintext form tagged with a set of label names and values, where only the label pairs are indexed. This tradeoff makes it cheaper to operate than a full index and allows developers to aggressively log -from their applications. Logs in Loki are queried using [LogQL](../logql.md). +from their applications. Logs in Loki are queried using [LogQL](../../logql). However, because of this design tradeoff, LogQL queries that filter based on content (i.e., text within the log lines) require loading all chunks within the search window that match the labels defined in the query. From 7257b1d04c06e19641ec8e95278ed730b6d4bbad Mon Sep 17 00:00:00 2001 From: Robby Milo Date: Mon, 20 Jul 2020 12:35:28 +0200 Subject: [PATCH 2/5] add workflow --- .github/workflows/publish.yml | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 .github/workflows/publish.yml diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 000000000000..74d5d2986f60 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,38 @@ +name: publish_docs + +on: + push: + branches: + - master + paths: + - 'docs/sources/**' + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v1 + - name: Build Website + run: | + docker run -v ${PWD}/docs/sources:/hugo/content/docs/loki/latest --rm grafana/docs-base:latest /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod' + sync: + runs-on: ubuntu-latest + needs: test + steps: + - uses: actions/checkout@v1 + - run: git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.GH_BOT_ACCESS_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync + - name: publish-to-git + uses: ./.github/actions/website-sync + id: publish + with: + repository: grafana/website + branch: master + host: github.com + github_pat: '${{ secrets.GH_BOT_ACCESS_TOKEN }}' + source_folder: docs/sources + target_folder: content/docs/loki/latest + - shell: bash + run: | + test -n "${{ steps.publish.outputs.commit_hash }}" + test -n "${{ steps.publish.outputs.working_directory }}" \ No newline at end of file From 73298357119ed6f91b24753c0daeccdffc14de7c Mon Sep 17 00:00:00 2001 From: Robby Milo Date: Mon, 20 Jul 2020 13:14:36 +0200 Subject: [PATCH 3/5] update docs content --- docs/.DS_Store | Bin 0 -> 6148 bytes docs/sources/clients/logstash/_index.md | 270 ++++++++++++++++++++++ docs/sources/configuration/_index.md | 2 +- docs/sources/installation/helm.md | 6 + docs/sources/operations/storage/_index.md | 2 +- docs/sources/operations/upgrade.md | 25 +- 6 files changed, 299 insertions(+), 6 deletions(-) create mode 100644 docs/.DS_Store create mode 100644 docs/sources/clients/logstash/_index.md diff --git a/docs/.DS_Store b/docs/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..d7c3df38845b09a28a5ee8a4c72c1b93ae7900af GIT binary patch literal 6148 zcmeHKOG*SW5UuEd4Q^)XGFO-z#36$Fxq$Sv=wgN*9A@8V^A_H~t>OWEuc{PVd*MbA zseDu&Sq1Hr(UfnyuadH-MFmzgZ` z$00Ea27-Zq#z30YRbAqv{BHg7dAw^g+7%jweghQ<^wA>#1D+#iYBYPM4!`0!7%~gz RH5?cZ0VO25VBi-Rcm?QJGR*)0 literal 0 HcmV?d00001 diff --git a/docs/sources/clients/logstash/_index.md b/docs/sources/clients/logstash/_index.md new file mode 100644 index 000000000000..d131998b1579 --- /dev/null +++ b/docs/sources/clients/logstash/_index.md @@ -0,0 +1,270 @@ +@@ -0,0 +1,269 @@ +# Logstash + +Loki has a [Logstash](https://www.elastic.co/logstash) output plugin called +`logstash-output-loki` that enables shipping logs to a Loki +instance or [Grafana Cloud](https://grafana.com/products/cloud/). + +## Installation + +### Local + +If you need to install the Loki output plugin manually you can do simply so by using the command below: + +```bash +$ bin/logstash-plugin install logstash-output-loki +``` + +This will download the latest gem for the output plugin and install it in logstash. + +### Docker + +We also provide a docker image on [docker hub](https://hub.docker.com/r/grafana/logstash-output-loki). The image contains logstash and the Loki output plugin +already pre-installed. + +For example if you want to run logstash in docker with the `loki.conf` as pipeline configuration you can use the command bellow : + +```bash +docker run -v `pwd`/loki-test.conf:/home/logstash/ --rm grafana/logstash-output-loki:1.0.1 -f loki-test.conf +``` + +### Kubernetes + +We also provides default helm values for scraping logs with Filebeat and forward them to Loki with logstash in our `loki-stack` umbrella chart. +You can switch from Promtail to logstash by using the following command: + +```bash +helm upgrade --install loki loki/loki-stack \ + --set filebeat.enabled=true,logstash.enabled=true,promtail.enabled=false +``` + +This will automatically scrape all pods logs in the cluster and send them to Loki with Kubernetes metadata attached as labels. +You can use the [`values.yaml`](../../../production/helm/loki-stack/values.yaml) file as a starting point for your own configuration. + +## Usage and Configuration + +To configure Logstash to forward logs to Loki, simply add the `loki` output to your [Logstash configuration file](https://www.elastic.co/guide/en/logstash/current/configuration-file-structure.html) as documented below : + +```conf +output { + loki { + [url => "" | default = none | required=true] + + [tenant_id => string | default = nil | required=false] + + [message_field => string | default = "message" | required=false] + + [batch_wait => number | default = 1(s) | required=false] + + [batch_size => number | default = 102400(bytes) | required=false] + + [min_delay => number | default = 1(s) | required=false] + + [max_delay => number | default = 300(s) | required=false] + + [retries => number | default = 10 | required=false] + + [username => string | default = nil | required=false] + + [password => secret | default = nil | required=false] + + [cert => path | default = nil | required=false] + + [key => path | default = nil| required=false] + + [ca_cert => path | default = nil | required=false] + + } +} +``` + +By default Loki will create entry from event fields it receives. +A logstash event as shown below. + +```conf +{ + "@timestamp" => 2017-04-26T19:33:39.257Z, + "src" => "localhost", + "@version" => "1", + "host" => "localhost.localdomain", + "pid" => "1", + "message" => "Apr 26 12:20:02 localhost systemd[1]: Starting system activity accounting tool...", + "type" => "stdin", + "prog" => "systemd", +} +``` + +Contains a `message` and `@timestamp` fields, which are respectively used to form the Loki entry log line and timestamp. + +> You can use a different property for the log line by using the configuration property [`message_field`](message_field). If you also need to change the timestamp value use the Logstash `date` filter to change the `@timestamp` field. + +All other fields (except nested fields) will form the label set (key value pairs) attached to the log line. [This means you're responsible for mutating and dropping high cardinality labels](https://grafana.com/blog/2020/04/21/how-labels-in-loki-can-make-log-queries-faster-and-easier/) such as client IPs. +You can usually do so by using a [`mutate`](https://www.elastic.co/guide/en/logstash/current/plugins-filters-mutate.html) filter. + +For example the configuration below : + +```conf +input { + ... +} + +filter { + mutate { + add_field => { + "cluster" => "us-central1" + "job" => "logstash" + } + replace => { "type" => "stream"} + remove_field => ["src"] + } +} +output { + loki { + url => "http://myloki.domain:3100/loki/api/v1/push" + } +} +``` + +Will add `cluster` and `job` static labels, remove `src` fields and replace `type` to be named `stream`. + +If you want to include nested fields or metadata fields (starting with `@`) you need to rename them. + +For example when using Filebeat with the [`add_kubernetes_metadata`](https://www.elastic.co/guide/en/beats/filebeat/current/add-kubernetes-metadata.html) processor, it will attach Kubernetes metadata to your events like below: + +```json +{ + "kubernetes" : { + "labels" : { + "app" : "MY-APP", + "pod-template-hash" : "959f54cd", + "serving" : "true", + "version" : "1.0", + "visualize" : "true" + }, + "pod" : { + "uid" : "e20173cb-3c5f-11ea-836e-02c1ee65b375", + "name" : "MY-APP-959f54cd-lhd5p" + }, + "node" : { + "name" : "ip-xxx-xx-xx-xxx.ec2.internal" + }, + "container" : { + "name" : "istio" + }, + "namespace" : "production", + "replicaset" : { + "name" : "MY-APP-959f54cd" + } + }, + "message": "Failed to parse configuration", + "@timestamp": "2017-04-26T19:33:39.257Z", +} +``` + +The filter below show you how to extract those Kubernetes fields into labels (`container_name`,`namespace`,`pod` and `host`): + +```conf +filter { + if [kubernetes] { + mutate { + add_field => { + "container_name" => "%{[kubernetes][container][name]}" + "namespace" => "%{[kubernetes][namespace]}" + "pod" => "%{[kubernetes][pod][name]}" + } + replace => { "host" => "%{[kubernetes][node][name]}"} + } + } + mutate { + remove_field => ["tags"] + } +} +``` + +### Configuration Properties + +#### url + +The url of the Loki server to send logs to. +When sending data the push path need to also be provided e.g. `http://localhost:3100/loki/api/v1/push`. + +If you want to send to [GrafanaCloud](https://grafana.com/products/cloud/) you would use `https://logs-prod-us-central1.grafana.net/loki/api/v1/push`. + +#### username / password + +Specify a username and password if the Loki server requires basic authentication. +If using the [GrafanaLab's hosted Loki](https://grafana.com/products/cloud/), the username needs to be set to your instance/user id and the password should be a Grafana.com api key. + +#### message_field + +Message field to use for log lines. You can use logstash key accessor language to grab nested property, for example : `[log][message]`. + +#### batch_wait + +Interval in seconds to wait before pushing a batch of records to Loki. This means even if the [batch size](#batch_size) is not reached after `batch_wait` a partial batch will be sent, this is to ensure freshness of the data. + +#### batch_size + +Maximum batch size to accrue before pushing to loki. Defaults to 102400 bytes + +#### Backoff config + +##### min_delay => 1(1s) + +Initial backoff time between retries + +##### max_delay => 300(5m) + +Maximum backoff time between retries + +##### retries => 10 + +Maximum number of retries to do + +#### tenant_id + +Loki is a multi-tenant log storage platform and all requests sent must include a tenant. For some installations the tenant will be set automatically by an authenticating proxy. Otherwise you can define a tenant to be passed through. The tenant can be any string value. + +#### client certificate verification + +Specify a pair of client certificate and private key with `cert` and `key` if a reverse proxy with client certificate verification is configured in front of Loki. `ca_cert` can also be specified if the server uses custom certificate authority. + +### Full configuration example + +```conf +input { + beats { + port => 5044 + } +} + +filter { + if [kubernetes] { + mutate { + add_field => { + "container_name" => "%{[kubernetes][container][name]}" + "namespace" => "%{[kubernetes][namespace]}" + "pod" => "%{[kubernetes][pod][name]}" + } + replace => { "host" => "%{[kubernetes][node][name]}"} + } + } + mutate { + remove_field => ["tags"] + } +} + +output { + loki { + url => "https://logs-prod-us-central1.grafana.net/loki/api/v1/push" + username => "3241" + password => "REDACTED" + batch_size => 112640 #112.64 kilobytes + retries => 5 + min_delay => 3 + max_delay => 500 + message_field => "message" + } + # stdout { codec => rubydebug } +} +``` \ No newline at end of file diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index e3225ed682f0..72b72d1cfb25 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -829,7 +829,7 @@ gcs: # CLI flag: -gcs.request-timeout [request_timeout: | default = 0s] -# Configures storing chunks in Cassandra +# Configures storing chunks and/or the index in Cassandra cassandra: # Comma-separated hostnames or IPs of Cassandra instances # CLI flag: -cassandra.addresses diff --git a/docs/sources/installation/helm.md b/docs/sources/installation/helm.md index e90fc0f78caa..a0ec817d89ee 100644 --- a/docs/sources/installation/helm.md +++ b/docs/sources/installation/helm.md @@ -46,6 +46,12 @@ helm upgrade --install loki loki/loki --set "key1=val1,key2=val2,..." helm upgrade --install loki loki/loki-stack --set grafana.enabled=true,prometheus.enabled=true,prometheus.alertmanager.persistentVolume.enabled=false,prometheus.server.persistentVolume.enabled=false ``` +### Deploy Loki Stack (Loki, Promtail, Grafana, Prometheus) with persistent volume claim + +```bash +helm upgrade --install loki loki/loki-stack --set grafana.enabled=true,prometheus.enabled=true,prometheus.alertmanager.persistentVolume.enabled=false,prometheus.server.persistentVolume.enabled=false,loki.persistence.enabled=true,loki.persistence.storageClassName=standard,loki.persistence.size=5Gi +``` + ### Deploy Loki Stack (Loki, Fluent Bit, Grafana, Prometheus) ```bash diff --git a/docs/sources/operations/storage/_index.md b/docs/sources/operations/storage/_index.md index 7986b88ee037..3f09c5b01d63 100644 --- a/docs/sources/operations/storage/_index.md +++ b/docs/sources/operations/storage/_index.md @@ -7,7 +7,7 @@ Loki needs to store two different types of data: **chunks** and **indexes**. Loki receives logs in separate streams, where each stream is uniquely identified by its tenant ID and its set of labels. As log entries from a stream arrive, -they are GZipped as "chunks" and saved in the chunks store. See [chunk +they are compressed as "chunks" and saved in the chunks store. See [chunk format](#chunk-format) for how chunks are stored internally. The **index** stores each stream's label set and links them to the individual diff --git a/docs/sources/operations/upgrade.md b/docs/sources/operations/upgrade.md index 7e7c856ff89b..d7cca1108a2e 100644 --- a/docs/sources/operations/upgrade.md +++ b/docs/sources/operations/upgrade.md @@ -15,7 +15,16 @@ On this page we will document any upgrade issues/gotchas/considerations we are a Configuration document has been re-orderd a bit and for all the config, corresponding `CLI` flag is provided. -S3 config now supports exapnded config. Example can be found here [s3_expanded_config](../../configuration/examples#s3-expanded-config) +S3 config now supports exapnded config. Example can be found here [s3_expanded_config](../configuration/examples.md#s3-expanded-config) + +### New Ingester GRPC API special rollout procedure in microservices mode + +A new ingester GRPC API has been added allowing to speed up metric queries, to ensure a rollout without query errors make sure you upgrade all ingesters first. +Once this is done you can then proceed with the rest of the deployment, this is to ensure that queriers won't look for an API not yet available. + +If you roll out everything at once, queriers with this new code will attempt to query ingesters which may not have the new method on the API and queries will fail. + +This will only affect reads(queries) and not writes and only for the duration of the rollout. ### Breaking CLI flags changes @@ -33,11 +42,19 @@ S3 config now supports exapnded config. Example can be found here [s3_expanded_c + ingester.concurrent-flushes ``` -## 1.6.0 +### Loki Canary metric name changes -A new ingester GRPC API has been added allowing to speed up metric queries, to ensure a rollout without query errors make sure you upgrade all ingesters first. -Once this is done you can then proceed with the rest of the deployment, this is to ensure that queriers won't look for an API not yet available. +When adding some new features to the canary we realized the existing metrics were not compliant with standards for counter names, the following metrics have been renamed: +```nohighlight +loki_canary_total_entries -> loki_canary_entries_total +loki_canary_out_of_order_entries -> loki_canary_out_of_order_entries_total +loki_canary_websocket_missing_entries -> loki_canary_websocket_missing_entries_total +loki_canary_missing_entries -> loki_canary_missing_entries_total +loki_canary_unexpected_entries -> loki_canary_unexpected_entries_total +loki_canary_duplicate_entries -> loki_canary_duplicate_entries_total +loki_canary_ws_reconnects -> loki_canary_ws_reconnects_total +``` ## 1.5.0 From 09fc207277b6d99071c663e88645f789c6ccf175 Mon Sep 17 00:00:00 2001 From: Robby Milo Date: Mon, 20 Jul 2020 13:21:18 +0200 Subject: [PATCH 4/5] update content --- docs/sources/storage/_index.md | 303 +++++++++++++++++++++++++++++++++ 1 file changed, 303 insertions(+) create mode 100644 docs/sources/storage/_index.md diff --git a/docs/sources/storage/_index.md b/docs/sources/storage/_index.md new file mode 100644 index 000000000000..c42e555a8c75 --- /dev/null +++ b/docs/sources/storage/_index.md @@ -0,0 +1,303 @@ +--- +title: Storage +weight: 1010 +--- +# Storage + +Loki uses a two pronged strategy regarding storage, which is responsible for both it's limitations and it's advantages. The main idea is that logs are large and traditional indexing strategies are prohibitively expensive and complex to run at scale. This often brings along ancillary procedure costs in the form of schema design, index management/rotation, backup/restore protocols, etc. Instead, Loki stores all the its log content unindexed in object storage. It then uses the Prometheus label paradigm along with a small but specialized index store to allow lookup, matching, and filtering based on the these labels. When a set of unique key/value label pairs are combined with their logs, we call this a _log stream_, which is generally analagous to a log file on disk. It may have labels like `{app="api", env="production", filename="/var/logs/app.log"}`, which together uniqely identify it. The object storage is responsible for storing the compressed logs cheaply while the index takes care of storing these labels in a way that enables fast, effective querying. + +- [Storage](#storage) + - [Implementations - Chunks](#implementations---chunks) + - [Cassandra](#cassandra) + - [GCS](#gcs) + - [File System](#file-system) + - [S3](#s3) + - [Notable Mentions](#notable-mentions) + - [Implementations - Index](#implementations---index) + - [Cassandra](#cassandra-1) + - [BigTable](#bigtable) + - [DynamoDB](#dynamodb) + - [Rate Limiting](#rate-limiting) + - [BoltDB](#boltdb) + - [Period Configs](#period-configs) + - [Table Manager](#table-manager) + - [Provisioning](#provisioning) + - [Upgrading Schemas](#upgrading-schemas) + - [Retention](#retention) + - [Examples](#examples) + - [Single machine/local development (boltdb+filesystem)](#single-machinelocal-development-boltdbfilesystem) + - [GCP deployment (GCS+BigTable)](#gcp-deployment-gcsbigtable) + - [AWS deployment (S3+DynamoDB)](#aws-deployment-s3dynamodb) + - [On prem deployment (Cassandra+Cassandra)](#on-prem-deployment-cassandracassandra) + - [On prem deployment (Cassandra+MinIO)](#on-prem-deployment-cassandraminio) + +## Implementations - Chunks + +### Cassandra + +Cassandra is a popular database and one of Loki's possible chunk stores and is production safe. + +### GCS + +GCS is a hosted object store offered by Google. It is a good candidate for a managed object store, especially when you're already running on GCP, and is production safe. + +### File System + +The file system is the simplest backend for chunks, although it's also susceptible to data loss as it's unreplicated. This is common for single binary deployments though, as well as for those trying out loki or doing local development on the project. It is similar in concept to many Prometheus deployments where a single Prometheus is responsible for monitoring a fleet. + +### S3 + +S3 is AWS's hosted object store. It is a good candidate for a managed object store, especially when you're already running on AWS, and is production safe. + +### Notable Mentions + +You may use any subsitutable services, such as those that implement the S3 API like [MinIO](https://min.io/). + +## Implementations - Index + +### Cassandra + +Cassandra can also be utilized for the index store and asides from the experimental [boltdb-shipper](./storage/boltdb-shipper.md), it's the only non-cloud offering that can be used for the index that's horizontally scalable and has configurable replication. It's a good candidate when you already run Cassandra, are running on-prem, or do not wish to use a managed cloud offering. + +### BigTable + +Bigtable is a cloud database offered by Google. It is a good candidate for a managed index store if you're already using it (due to it's heavy fixed costs) or wish to run in GCP. + +### DynamoDB + +DynamoDB is a cloud database offered by AWS. It is a good candidate for a managed index store, especially if you're already running in AWS. + +#### Rate Limiting + +DynamoDB is susceptible to rate limiting, particularly due to overconsuming what is called [provisioned capacity](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html). This can be controlled via the [provisioning](#Provisioning) configs in the table manager. + +### BoltDB + +BoltDB is an embedded database on disk. It is not replicated and thus cannot be used for high availability or clustered Loki deployments, but is commonly paired with a `filesystem` chunk store for proof of concept deployments, trying out Loki, and development. There is also an experimental mode, the [boltdb-shipper](./operations/storage/boltdb-shipper.md), which aims to support clustered deployments using `boltdb` as an index. + +## Period Configs + +Loki aims to be backwards compatible and over the course of it's development has had many internal changes that facilitate better and more efficient storage/querying. Loki allows incrementally upgrading to these new storage _schemas_ and can query across them transparently. This makes upgrading a breeze. For instance, this is what it looks like when migrating from the v10 -> v11 schemas starting 2020-07-01: + +```yaml +schema_config: + configs: + - from: 2019-07-01 + store: boltdb + object_store: filesystem + schema: v10 + index: + prefix: index_ + period: 168h + - from: 2020-07-01 + store: boltdb + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 168h +``` + +For all data ingested before 2020-07-01, Loki used the v10 schema and then switched after that point to the more effective v11. This dramatically simplifies upgrading, ensuring it's simple to take advantages of new storage optimizations. These configs should be immutable for as long as you care about retention. + +## Table Manager + +One of the subcomponents in Loki is the `table-manager`. It is responsible for pre-creating and expiring index tables. This helps partition the writes and reads in loki across a set of distinct indices in order to prevent unbounded growth. + +```yaml +table_manager: + # The retention period must be a multiple of the index / chunks + # table "period" (see period_config). + retention_deletes_enabled: true + # This is 15 weeks retention, based on the 168h (1week) period durations used in the rest of the examples. + retention_period: 2520h +``` + +For more information, see the table manager [doc](./operations/storage/table-manager.md). + +### Provisioning + +In the case of AWS DynamoDB, you'll likely want to tune the provisioned throughput for your tables as well. This is to prevent your tables being rate limited on one hand and assuming unnecessary cost on the other. By default Loki uses a [provisioned capacity](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html) strategy for DynamoDB tables like so: + +``` +table_manager: + index_tables_provisioning: + # Read/write throughput requirements for the current table + # (the table which would handle writes/reads for data timestamped at the current time) + provisioned_write_throughput: | default = 3000 + provisioned_read_throughput: | default = 300 + + # Read/write throughput requirements for non-current tables + inactive_write_throughput: | default = 1 + inactive_read_throughput: | Default = 300 +``` + +Note, there are a few other DynamoDB provisioning options including DynamoDB autoscaling and on-demand capacity. See the [docs](./configuration/README.md#provision_config) for more information. + +## Upgrading Schemas + +When a new schema is released and you want to gain the advantages it provides, you can! Loki can transparently query & merge data from across schema boundaries so there is no disruption of service and upgrading is easy. + +First, you'll want to create a new [period_config](./configuration/README.md#period_config) entry in your [schema_config](./configuration/README.md#schema_config). The important thing to remember here is to set this at some point in the _future_ and then roll out the config file changes to Loki. This allows the table manager to create the required table in advance of writes and will ensure that existing data isn't queried as if it adheres to the new schema. + +As an example, let's say it's 2020-07-14 and we want to start using the `v11` schema on the 20th: +```yaml +schema_config: + configs: + - from: 2019-07-14 + store: boltdb + object_store: filesystem + schema: v10 + index: + prefix: index_ + period: 168h + - from: 2020-07-20 + store: boltdb + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 168h +``` + +It's that easy; we just created a new entry starting on the 20th. + +## Retention + +With the exception of the `filesystem` chunk store, Loki will not delete old chunk stores. This is generally handled instead by configuring TTLs (time to live) in the chunk store of your choice (bucket lifecycles in S3/GCS, and TTLs in Cassandra). Neither will Loki currently delete old data when your local disk fills when using the `filesystem` chunk store -- deletion is only determined by retention duration. + +We're interested in adding targeted deletion in future Loki releases (think tenant or stream level granularity) and may include other strategies as well. + +For more information, see the configuration [docs](./operations/storage/retention.md). + + +## Examples + +### Single machine/local development (boltdb+filesystem) + +```yaml +storage_config: + boltdb: + directory: /tmp/loki/index + filesystem: + directory: /tmp/loki/chunks + +schema_config: + configs: + - from: 2020-07-01 + store: boltdb + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 168h +``` + +### GCP deployment (GCS+BigTable) + +```yaml +storage_config: + bigtable: + instance: + project: + gcs: + bucket_name: + +schema_config: + configs: + - from: 2020-07-01 + store: bigtable + object_store: gcs + schema: v11 + index: + prefix: index_ + period: 168h +``` + +### AWS deployment (S3+DynamoDB) + +```yaml +storage_config: + aws: + s3: s3://:@ + bucketnames: + dynamodb: + dynamodb_url: dynamodb://:@ + +schema_config: + configs: + - from: 2020-07-01 + store: aws + object_store: aws + schema: v11 + index: + prefix: index_ + period: 168h +``` + +If you don't wish to hard-code S3 credentials, you can also configure an EC2 +instance role by changing the `storage_config` section: + +```yaml +storage_config: + aws: + s3: s3://region + bucketnames: + dynamodb: + dynamodb_url: dynamodb://region +``` + +### On prem deployment (Cassandra+Cassandra) + +```yaml +storage_config: + cassandra: + addresses: + keyspace: + auth: + username: # only applicable when auth=true + password: # only applicable when auth=true + +schema_config: + configs: + - from: 2020-07-01 + store: cassandra + object_store: cassandra + schema: v11 + index: + prefix: index_ + period: 168h + chunks: + prefix: chunk_ + period: 168h + +``` + +### On prem deployment (Cassandra+MinIO) + +We configure MinIO by using the AWS config because MinIO implements the S3 API: + +```yaml +storage_config: + aws: + # Note: use a fully qualified domain name, like localhost. + # full example: http://loki:supersecret@localhost.:9000 + s3: http://:@: + s3forcepathstyle: true + cassandra: + addresses: + keyspace: + auth: + username: # only applicable when auth=true + password: # only applicable when auth=true + +schema_config: + configs: + - from: 2020-07-01 + store: cassandra + object_store: aws + schema: v11 + index: + prefix: index_ + period: 168h +``` From 9271789a11c0de154564380b268b9eeb1c2124b1 Mon Sep 17 00:00:00 2001 From: Robby Milo Date: Mon, 20 Jul 2020 16:43:31 +0200 Subject: [PATCH 5/5] cleanup --- .gitignore | 1 + docs/.DS_Store | Bin 6148 -> 0 bytes docs/sources/.DS_Store | Bin 10244 -> 0 bytes docs/storage.md | 296 ----------------------------------------- 4 files changed, 1 insertion(+), 296 deletions(-) delete mode 100644 docs/.DS_Store delete mode 100644 docs/sources/.DS_Store delete mode 100644 docs/storage.md diff --git a/.gitignore b/.gitignore index c8cb7d3ccc55..49231e91d02e 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ dlv rootfs/ dist coverage.txt +.DS_Store \ No newline at end of file diff --git a/docs/.DS_Store b/docs/.DS_Store deleted file mode 100644 index d7c3df38845b09a28a5ee8a4c72c1b93ae7900af..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKOG*SW5UuEd4Q^)XGFO-z#36$Fxq$Sv=wgN*9A@8V^A_H~t>OWEuc{PVd*MbA zseDu&Sq1Hr(UfnyuadH-MFmzgZ` z$00Ea27-Zq#z30YRbAqv{BHg7dAw^g+7%jweghQ<^wA>#1D+#iYBYPM4!`0!7%~gz RH5?cZ0VO25VBi-Rcm?QJGR*)0 diff --git a/docs/sources/.DS_Store b/docs/sources/.DS_Store deleted file mode 100644 index 9a621a09f14b9745b8a4c7907cc7608c4e6cf65d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10244 zcmeI2&ubGw6vyA#6r-)RAb8%J3Km*X3l?Op@h;>dDA@e64J6t0HzG*z;!!W+!Mi7a z^y)z<9z;C}{so@IKft@+H#6CtX?8bj52En4?7npNz1h$E&ScUzDG{mV?dAeeKtyGn zY-gu&NjdmFpCiW++j0?BA)Xehji}on@skEoKok%KL;+Di6!>=(z-KleyUeEFi*udJ*+JUjSgF~56SQDyjiJ?q5+5_ts zT01b-gp;zxhq6yrwn9<*>F7Vu?xaFvQlS3>elb=&&i!`RH@WE zQ5`MiuhTDQKGr{6bMH2D?)KN*UiId{G@iFX4O*i%ZBYxawrCqIEssFmy^LpM`npVa zF2zha{t0Cfe~(*f?is3sb`!L7&}#!(FnJqSw0i7xCIEY`zsvt=H|8G`ljKk|#>+!5 z<8&0n9dNg(2Y#RY%FTU{AMkKltG8Ya$A82tq09F2c$W6?FggaFh`RXM1`*GR`@Wis z@6)|puH7H6hg>QBb)$qW-_zrx73Mpk`FeB@Y^!>v*7UeX)blu%n#}ihdA}{b8S>`z z?{XK5@c2~#Abg@W^}J>J9_DTnYB!KaH+2Tk^0Oi_ZZ7K&;#|f)%gyLd;=H`fxST{j zEsO-e2YYxur{9~rw(zRb^u~rB-$qtrBHWywe2;S)8&B-{)Ro?n$sl(0`pezUCLs`*&To#>1G+iFawz zJ^bf~{QN1*Kos~d6i}r~wXzIUySF|9 z&3o+u&J~<|u;0K~9fAwHaJ$})$1l7cf7Z6ef3)L%Xxj$H>TtyAaM-`}WkAM`fm^#2 qujJiIrg)TJ*X*+#nKim!cS-%9{FhnCx@FYqzG_nc$F;cLEBFl)oV5J_ diff --git a/docs/storage.md b/docs/storage.md deleted file mode 100644 index 0876a9c79639..000000000000 --- a/docs/storage.md +++ /dev/null @@ -1,296 +0,0 @@ -# Storage - -Loki uses a two pronged strategy regarding storage, which is responsible for both it's limitations and it's advantages. The main idea is that logs are large and traditional indexing strategies are prohibitively expensive and complex to run at scale. This often brings along ancillary procedure costs in the form of schema design, index management/rotation, backup/restore protocols, etc. Instead, Loki stores all the its log content unindexed in object storage. It then uses the Prometheus label paradigm along with a small but specialized index store to allow lookup, matching, and filtering based on the these labels. When a set of unique key/value label pairs are combined with their logs, we call this a _log stream_, which is generally analagous to a log file on disk. It may have labels like `{app="api", env="production", filename="/var/logs/app.log"}`, which together uniqely identify it. The object storage is responsible for storing the compressed logs cheaply while the index takes care of storing these labels in a way that enables fast, effective querying. - -* [Chunk Clients](#Implementations---Chunks) - * [Cassandra](#Cassandra) - * [GCS](#GCS) - * [File System](#File-System) - * [S3](#S3) - * [Notable Mentions](#Notable-Mentions) -* [Index Clients](#Implementations---Index) - * [Cassandra](#Cassandra-1) - * [BigTable](#BigTable) - * [DynamoDB](#DynamoDB) - * [BoltDB](#BoltDB) -* [Period Configs](#Period-Configs) -* [Table Manger](#Table-Manager) -* [Upgrading Schemas](#Upgrading-Schemas) -* [Retention](#Retention) -* [Examples](Examples) - * [Single machine/local development (boltdb+filesystem)](Single-machine/local-development-(boltdb+filesystem)) - * [GCP deployment (GCS+BigTable)](GCP-deployment-(GCS+BigTable)) - * [AWS deployment (S3+DynamoDB)](AWS-deployment-(S3+DynamoDB)) - * [On prem deployment (Cassandra+Cassandra)](On-prem-deployment-(Cassandra+Cassandra)) - * [On prem deployment (Cassandra+MinIO)](On-prem-deployment-(Cassandra+MinIO)) - -## Implementations - Chunks - -### Cassandra - -Cassandra is a popular database and one of Loki's possible chunk stores and is production safe. - -### GCS - -GCS is a hosted object store offered by Google. It is a good candidate for a managed object store, especially when you're already running on GCP, and is production safe. - -### File System - -The file system is the simplest backend for chunks, although it's also susceptible to data loss as it's unreplicated. This is common for single binary deployments though, as well as for those trying out loki or doing local development on the project. It is similar in concept to many Prometheus deployments where a single Prometheus is responsible for monitoring a fleet. - -### S3 - -S3 is AWS's hosted object store. It is a good candidate for a managed object store, especially when you're already running on AWS, and is production safe. - -### Notable Mentions - -You may use any subsitutable services, such as those that implement the S3 API like [MinIO](https://min.io/). - -## Implementations - Index - -### Cassandra - -Cassandra can also be utilized for the index store and asides from the experimental [boltdb-shipper](./storage/boltdb-shipper.md), it's the only non-cloud offering that can be used for the index that's horizontally scalable and has configurable replication. It's a good candidate when you already run Cassandra, are running on-prem, or do not wish to use a managed cloud offering. - -### BigTable - -Bigtable is a cloud database offered by Google. It is a good candidate for a managed index store if you're already using it (due to it's heavy fixed costs) or wish to run in GCP. - -### DynamoDB - -DynamoDB is a cloud database offered by AWS. It is a good candidate for a managed index store, especially if you're already running in AWS. - -#### Rate Limiting - -DynamoDB is susceptible to rate limiting, particularly due to overconsuming what is called [provisioned capacity](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html). This can be controlled via the [provisioning](#Provisioning) configs in the table manager. - -### BoltDB - -BoltDB is an embedded database on disk. It is not replicated and thus cannot be used for high availability or clustered Loki deployments, but is commonly paired with a `filesystem` chunk store for proof of concept deployments, trying out Loki, and development. There is also an experimental mode, the [boltdb-shipper](./operations/storage/boltdb-shipper.md), which aims to support clustered deployments using `boltdb` as an index. - -## Period Configs - -Loki aims to be backwards compatible and over the course of it's development has had many internal changes that facilitate better and more efficient storage/querying. Loki allows incrementally upgrading to these new storage _schemas_ and can query across them transparently. This makes upgrading a breeze. For instance, this is what it looks like when migrating from the v10 -> v11 schemas starting 2020-07-01: - -```yaml -schema_config: - configs: - - from: 2019-07-01 - store: boltdb - object_store: filesystem - schema: v10 - index: - prefix: index_ - period: 168h - - from: 2020-07-01 - store: boltdb - object_store: filesystem - schema: v11 - index: - prefix: index_ - period: 168h -``` - -For all data ingested before 2020-07-01, Loki used the v10 schema and then switched after that point to the more effective v11. This dramatically simplifies upgrading, ensuring it's simple to take advantages of new storage optimizations. These configs should be immutable for as long as you care about retention. - -## Table Manager - -One of the subcomponents in Loki is the `table-manager`. It is responsible for pre-creating and expiring index tables. This helps partition the writes and reads in loki across a set of distinct indices in order to prevent unbounded growth. - -```yaml -table_manager: - # The retention period must be a multiple of the index / chunks - # table "period" (see period_config). - retention_deletes_enabled: true - # This is 15 weeks retention, based on the 168h (1week) period durations used in the rest of the examples. - retention_period: 2520h -``` - -For more information, see the table manager [doc](./operations/storage/table-manager.md). - -### Provisioning - -In the case of AWS DynamoDB, you'll likely want to tune the provisioned throughput for your tables as well. This is to prevent your tables being rate limited on one hand and assuming unnecessary cost on the other. By default Loki uses a [provisioned capacity](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html) strategy for DynamoDB tables like so: - -``` -table_manager: - index_tables_provisioning: - # Read/write throughput requirements for the current table - # (the table which would handle writes/reads for data timestamped at the current time) - provisioned_write_throughput: | default = 3000 - provisioned_read_throughput: | default = 300 - - # Read/write throughput requirements for non-current tables - inactive_write_throughput: | default = 1 - inactive_read_throughput: | Default = 300 -``` - -Note, there are a few other DynamoDB provisioning options including DynamoDB autoscaling and on-demand capacity. See the [docs](./configuration/README.md#provision_config) for more information. - -## Upgrading Schemas - -When a new schema is released and you want to gain the advantages it provides, you can! Loki can transparently query & merge data from across schema boundaries so there is no disruption of service and upgrading is easy. - -First, you'll want to create a new [period_config](./configuration/README.md#period_config) entry in your [schema_config](./configuration/README.md#schema_config). The important thing to remember here is to set this at some point in the _future_ and then roll out the config file changes to Loki. This allows the table manager to create the required table in advance of writes and will ensure that existing data isn't queried as if it adheres to the new schema. - -As an example, let's say it's 2020-07-14 and we want to start using the `v11` schema on the 20th: -```yaml -schema_config: - configs: - - from: 2019-07-14 - store: boltdb - object_store: filesystem - schema: v10 - index: - prefix: index_ - period: 168h - - from: 2020-07-20 - store: boltdb - object_store: filesystem - schema: v11 - index: - prefix: index_ - period: 168h -``` - -It's that easy; we just created a new entry starting on the 20th. - -## Retention - -With the exception of the `filesystem` chunk store, Loki will not delete old chunk stores. This is generally handled instead by configuring TTLs (time to live) in the chunk store of your choice (bucket lifecycles in S3/GCS, and TTLs in Cassandra). Neither will Loki currently delete old data when your local disk fills when using the `filesystem` chunk store -- deletion is only determined by retention duration. - -We're interested in adding targeted deletion in future Loki releases (think tenant or stream level granularity) and may include other strategies as well. - -For more information, see the configuration [docs](./operations/storage/retention.md). - - -## Examples - -### Single machine/local development (boltdb+filesystem) - -```yaml -storage_config: - boltdb: - directory: /tmp/loki/index - filesystem: - directory: /tmp/loki/chunks - -schema_config: - configs: - - from: 2020-07-01 - store: boltdb - object_store: filesystem - schema: v11 - index: - prefix: index_ - period: 168h -``` - -### GCP deployment (GCS+BigTable) - -```yaml -storage_config: - bigtable: - instance: - project: - gcs: - bucket_name: - -schema_config: - configs: - - from: 2020-07-01 - store: bigtable - object_store: gcs - schema: v11 - index: - prefix: index_ - period: 168h -``` - -### AWS deployment (S3+DynamoDB) - -```yaml -storage_config: - aws: - s3: s3://:@ - bucketnames: - dynamodb: - dynamodb_url: dynamodb://:@ - -schema_config: - configs: - - from: 2020-07-01 - store: aws - object_store: aws - schema: v11 - index: - prefix: index_ - period: 168h -``` - -If you don't wish to hard-code S3 credentials, you can also configure an EC2 -instance role by changing the `storage_config` section: - -```yaml -storage_config: - aws: - s3: s3://region - bucketnames: - dynamodb: - dynamodb_url: dynamodb://region -``` - -### On prem deployment (Cassandra+Cassandra) - -```yaml -storage_config: - cassandra: - addresses: - keyspace: - auth: - username: # only applicable when auth=true - password: # only applicable when auth=true - -schema_config: - configs: - - from: 2020-07-01 - store: cassandra - object_store: cassandra - schema: v11 - index: - prefix: index_ - period: 168h - chunks: - prefix: chunk_ - period: 168h - -``` - -### On prem deployment (Cassandra+MinIO) - -We configure MinIO by using the AWS config because MinIO implements the S3 API: - -```yaml -storage_config: - aws: - # Note: use a fully qualified domain name, like localhost. - # full example: http://loki:supersecret@localhost.:9000 - s3: http://:@: - s3forcepathstyle: true - cassandra: - addresses: - keyspace: - auth: - username: # only applicable when auth=true - password: # only applicable when auth=true - -schema_config: - configs: - - from: 2020-07-01 - store: cassandra - object_store: aws - schema: v11 - index: - prefix: index_ - period: 168h -```