mirror of https://github.com/go-redis/redis.git
chore: update otel example to the latest versions (#2606)
This commit is contained in:
parent
6f0af685cf
commit
99f8464a5a
|
@ -1,53 +0,0 @@
|
|||
# See https://prometheus.io/docs/alerting/latest/configuration/ for details.
|
||||
|
||||
global:
|
||||
# The smarthost and SMTP sender used for mail notifications.
|
||||
smtp_smarthost: 'mailhog:1025'
|
||||
smtp_from: 'alertmanager@example.com'
|
||||
smtp_require_tls: false
|
||||
|
||||
receivers:
|
||||
- name: 'team-X'
|
||||
email_configs:
|
||||
- to: 'some-receiver@example.com'
|
||||
send_resolved: true
|
||||
|
||||
# The root route on which each incoming alert enters.
|
||||
route:
|
||||
# The labels by which incoming alerts are grouped together. For example,
|
||||
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
|
||||
# be batched into a single group.
|
||||
group_by: ['alertname', 'cluster', 'service']
|
||||
|
||||
# When a new group of alerts is created by an incoming alert, wait at
|
||||
# least 'group_wait' to send the initial notification.
|
||||
# This way ensures that you get multiple alerts for the same group that start
|
||||
# firing shortly after another are batched together on the first
|
||||
# notification.
|
||||
group_wait: 30s
|
||||
|
||||
# When the first notification was sent, wait 'group_interval' to send a batch
|
||||
# of new alerts that started firing for that group.
|
||||
group_interval: 5m
|
||||
|
||||
# If an alert has successfully been sent, wait 'repeat_interval' to
|
||||
# resend them.
|
||||
repeat_interval: 3h
|
||||
|
||||
# A default receiver
|
||||
receiver: team-X
|
||||
|
||||
# All the above attributes are inherited by all child routes and can
|
||||
# overwritten on each.
|
||||
|
||||
# The child route trees.
|
||||
routes:
|
||||
# This route matches error alerts created from spans or logs.
|
||||
- matchers:
|
||||
- alert_kind="error"
|
||||
group_interval: 24h
|
||||
receiver: team-X
|
||||
|
||||
# The directory from which notification templates are read.
|
||||
templates:
|
||||
- '/etc/alertmanager/template/*.tmpl'
|
|
@ -1,22 +1,18 @@
|
|||
[sources.syslog_logs]
|
||||
type = "demo_logs"
|
||||
format = "syslog"
|
||||
interval = 0.1
|
||||
|
||||
[sources.apache_common_logs]
|
||||
type = "demo_logs"
|
||||
format = "apache_common"
|
||||
interval = 0.1
|
||||
|
||||
[sources.apache_error_logs]
|
||||
type = "demo_logs"
|
||||
format = "apache_error"
|
||||
interval = 0.1
|
||||
|
||||
[sources.json_logs]
|
||||
type = "demo_logs"
|
||||
format = "json"
|
||||
interval = 0.1
|
||||
|
||||
# Parse Syslog logs
|
||||
# See the Vector Remap Language reference for more info: https://vrl.dev
|
||||
|
|
|
@ -2,7 +2,7 @@ version: '3'
|
|||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:22.7
|
||||
image: clickhouse/clickhouse-server:22.10
|
||||
restart: on-failure
|
||||
environment:
|
||||
CLICKHOUSE_DB: uptrace
|
||||
|
@ -12,17 +12,34 @@ services:
|
|||
timeout: 1s
|
||||
retries: 30
|
||||
volumes:
|
||||
- ch_data:/var/lib/clickhouse
|
||||
- ch_data1:/var/lib/clickhouse
|
||||
ports:
|
||||
- '8123:8123'
|
||||
- '9000:9000'
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
restart: on-failure
|
||||
environment:
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
POSTGRES_USER: uptrace
|
||||
POSTGRES_PASSWORD: uptrace
|
||||
POSTGRES_DB: uptrace
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready']
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 30
|
||||
volumes:
|
||||
- 'pg_data1:/var/lib/postgresql/data/pgdata'
|
||||
ports:
|
||||
- '5432:5432'
|
||||
|
||||
uptrace:
|
||||
image: 'uptrace/uptrace:1.3.0'
|
||||
image: 'uptrace/uptrace:1.4.7'
|
||||
#image: 'uptrace/uptrace-dev:latest'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- uptrace_data:/var/lib/uptrace
|
||||
- ./uptrace.yml:/etc/uptrace/uptrace.yml
|
||||
#environment:
|
||||
# - DEBUG=2
|
||||
|
@ -33,7 +50,7 @@ services:
|
|||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
otel-collector:
|
||||
otelcol:
|
||||
image: otel/opentelemetry-collector-contrib:0.58.0
|
||||
restart: on-failure
|
||||
volumes:
|
||||
|
@ -43,22 +60,10 @@ services:
|
|||
- '4318:4318'
|
||||
|
||||
vector:
|
||||
image: timberio/vector:0.24.X-alpine
|
||||
image: timberio/vector:0.28.X-alpine
|
||||
volumes:
|
||||
- ./config/vector.toml:/etc/vector/vector.toml:ro
|
||||
|
||||
alertmanager:
|
||||
image: prom/alertmanager:v0.24.0
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./config/alertmanager.yml:/etc/alertmanager/config.yml
|
||||
- alertmanager_data:/alertmanager
|
||||
ports:
|
||||
- 9093:9093
|
||||
command:
|
||||
- '--config.file=/etc/alertmanager/config.yml'
|
||||
- '--storage.path=/alertmanager'
|
||||
|
||||
mailhog:
|
||||
image: mailhog/mailhog:v1.0.1
|
||||
restart: on-failure
|
||||
|
@ -73,6 +78,5 @@ services:
|
|||
image: redis
|
||||
|
||||
volumes:
|
||||
uptrace_data:
|
||||
ch_data:
|
||||
alertmanager_data:
|
||||
ch_data1:
|
||||
pg_data1:
|
||||
|
|
|
@ -29,6 +29,16 @@ ch:
|
|||
# Maximum query execution time.
|
||||
max_execution_time: 30s
|
||||
|
||||
##
|
||||
## PostgreSQL db that is used to store metadata such us metric names, dashboards, alerts,
|
||||
## and so on.
|
||||
##
|
||||
pg:
|
||||
addr: postgres:5432
|
||||
user: uptrace
|
||||
password: uptrace
|
||||
database: uptrace
|
||||
|
||||
##
|
||||
## A list of pre-configured projects. Each project is fully isolated.
|
||||
##
|
||||
|
@ -95,82 +105,17 @@ metrics_from_spans:
|
|||
where: span.is_event
|
||||
|
||||
##
|
||||
## Alerting rules for monitoring metrics.
|
||||
##
|
||||
## See https://uptrace.dev/get/alerting.html for details.
|
||||
##
|
||||
alerting:
|
||||
rules:
|
||||
- name: Network errors
|
||||
metrics:
|
||||
- system.network.errors as $net_errors
|
||||
query:
|
||||
- $net_errors > 0 group by host.name
|
||||
# for the last 5 minutes
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: '{{ $labels.host_name }} has high number of net errors: {{ $values.net_errors }}'
|
||||
|
||||
- name: Filesystem usage >= 90%
|
||||
metrics:
|
||||
- system.filesystem.usage as $fs_usage
|
||||
query:
|
||||
- group by host.name
|
||||
- group by device
|
||||
- where device !~ "loop"
|
||||
- $fs_usage{state="used"} / $fs_usage >= 0.9
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: '{{ $labels.host_name }} has high FS usage: {{ $values.fs_usage }}'
|
||||
|
||||
- name: Uptrace is dropping spans
|
||||
metrics:
|
||||
- uptrace.projects.spans as $spans
|
||||
query:
|
||||
- $spans{type=dropped} > 0
|
||||
for: 1m
|
||||
annotations:
|
||||
summary: 'Uptrace has dropped {{ $values.spans }} spans'
|
||||
|
||||
- name: Always firing (for fun and testing)
|
||||
metrics:
|
||||
- process.runtime.go.goroutines as $goroutines
|
||||
query:
|
||||
- $goroutines >= 0 group by host.name
|
||||
for: 1m
|
||||
annotations:
|
||||
summary: '{{ $labels.host_name }} has high number of goroutines: {{ $values.goroutines }}'
|
||||
|
||||
# Create alerts from error logs and span events.
|
||||
create_alerts_from_spans:
|
||||
enabled: true
|
||||
labels:
|
||||
alert_kind: error
|
||||
|
||||
##
|
||||
## AlertManager client configuration.
|
||||
## See https://uptrace.dev/get/alerting.html for details.
|
||||
##
|
||||
## Note that this is NOT an AlertManager config and you need to configure AlertManager separately.
|
||||
## See https://prometheus.io/docs/alerting/latest/configuration/ for details.
|
||||
##
|
||||
alertmanager_client:
|
||||
# AlertManager API endpoints that Uptrace uses to manage alerts.
|
||||
urls:
|
||||
- 'http://alertmanager:9093/api/v2/alerts'
|
||||
|
||||
##
|
||||
## To require authentication, uncomment the following section.
|
||||
## To require authentication, uncomment one of the following sections.
|
||||
##
|
||||
auth:
|
||||
# users:
|
||||
# - username: uptrace
|
||||
# password: uptrace
|
||||
# - username: admin
|
||||
# password: admin
|
||||
users:
|
||||
- name: Anonymous
|
||||
email: uptrace@localhost
|
||||
password: uptrace
|
||||
notify_by_email: true
|
||||
|
||||
# # Cloudflare user provider: uses Cloudflare Zero Trust Access (Identity)
|
||||
# # See https://developers.cloudflare.com/cloudflare-one/identity/ for more info.
|
||||
# Cloudflare Zero Trust Access (Identity)
|
||||
# See https://developers.cloudflare.com/cloudflare-one/identity/ for more info.
|
||||
# cloudflare:
|
||||
# # The base URL of the Cloudflare Zero Trust team.
|
||||
# - team_url: https://myteam.cloudflareaccess.com
|
||||
|
@ -178,27 +123,24 @@ auth:
|
|||
# # You can retrieve this from the Cloudflare Zero Trust 'Access' Dashboard.
|
||||
# audience: bea6df23b944e4a0cd178609ba1bb64dc98dfe1f66ae7b918e563f6cf28b37e0
|
||||
|
||||
# # OpenID Connect (Single Sign-On)
|
||||
# oidc:
|
||||
# # The ID is used in API endpoints, for example, in redirect URL
|
||||
# # `http://<uptrace-host>/api/v1/sso/<oidc-id>/callback`.
|
||||
# - id: keycloak
|
||||
# # Display name for the button in the login form.
|
||||
# # Default to 'OpenID Connect'
|
||||
# display_name: Keycloak
|
||||
# # The base URL for the OIDC provider.
|
||||
# issuer_url: http://localhost:8080/realms/uptrace
|
||||
# # The OAuth 2.0 Client ID
|
||||
# client_id: uptrace
|
||||
# # The OAuth 2.0 Client Secret
|
||||
# client_secret: ogbhd8Q0X0e5AZFGSG3m9oirPvnetqkA
|
||||
# # Additional OAuth 2.0 scopes to request from the OIDC provider.
|
||||
# # Defaults to 'profile'. 'openid' is requested by default and need not be specified.
|
||||
# scopes:
|
||||
# - profile
|
||||
# # The OIDC UserInfo claim to use as the user's username.
|
||||
# # Defaults to 'preferred_username'.
|
||||
# claim: preferred_username
|
||||
# OpenID Connect (Single Sign-On)
|
||||
oidc:
|
||||
# # The ID is used in API endpoints, for example, in redirect URL
|
||||
# # `http://<uptrace-host>/api/v1/sso/<oidc-id>/callback`.
|
||||
# - id: keycloak
|
||||
# # Display name for the button in the login form.
|
||||
# # Default to 'OpenID Connect'
|
||||
# display_name: Keycloak
|
||||
# # The base URL for the OIDC provider.
|
||||
# issuer_url: http://localhost:8080/realms/uptrace
|
||||
# # The OAuth 2.0 Client ID
|
||||
# client_id: uptrace
|
||||
# # The OAuth 2.0 Client Secret
|
||||
# client_secret: ogbhd8Q0X0e5AZFGSG3m9oirPvnetqkA
|
||||
# # Additional OAuth 2.0 scopes to request from the OIDC provider.
|
||||
# # Defaults to 'profile'. 'openid' is requested by default and need not be specified.
|
||||
# scopes:
|
||||
# - profile
|
||||
|
||||
##
|
||||
## Various options to tweak ClickHouse schema.
|
||||
|
@ -248,6 +190,8 @@ listen:
|
|||
site:
|
||||
# Overrides public URL for Vue-powered UI in case you put Uptrace behind a proxy.
|
||||
#addr: 'https://uptrace.mydomain.com'
|
||||
# The base path for the Vue-powered UI in case you serve Uptrace UI behind a sub path.
|
||||
path: '/'
|
||||
|
||||
##
|
||||
## Spans processing options.
|
||||
|
@ -277,19 +221,6 @@ metrics:
|
|||
# The number of measures to insert in a single query.
|
||||
#batch_size: 10000
|
||||
|
||||
##
|
||||
## SQLite/PostgreSQL db that is used to store metadata such us metric names, dashboards, alerts,
|
||||
## and so on.
|
||||
##
|
||||
db:
|
||||
# Either sqlite or postgres.
|
||||
driver: sqlite
|
||||
# Database connection string.
|
||||
#
|
||||
# Uptrace automatically creates SQLite database file in the current working directory.
|
||||
# Make sure the directory is writable by Uptrace process.
|
||||
dsn: 'file:uptrace.sqlite3?_pragma=foreign_keys(1)&_pragma=busy_timeout(1000)'
|
||||
|
||||
##
|
||||
## uptrace-go client configuration.
|
||||
## Uptrace sends internal telemetry here. Defaults to listen.grpc.addr.
|
||||
|
@ -301,6 +232,18 @@ uptrace_go:
|
|||
# key_file: config/tls/uptrace.key
|
||||
# insecure_skip_verify: true
|
||||
|
||||
##
|
||||
## SMTP settings to send emails.
|
||||
## https://uptrace.dev/get/alerting.html
|
||||
##
|
||||
smtp_mailer:
|
||||
enabled: true
|
||||
host: mailhog
|
||||
port: 1025
|
||||
username: mailhog
|
||||
password: mailhog
|
||||
from: 'uptrace@localhost'
|
||||
|
||||
##
|
||||
## Logging configuration.
|
||||
##
|
||||
|
|
Loading…
Reference in New Issue