Skip to content
CodeSook
CodeSook

09.Setup Observe Services


เราจะมา setup Observe Services กัน
โดยเราจะใช้ tools 3 ตัว

  1. Tempo เป็น Tracing backend ใช้เก็บ Tracing data
  2. Prometheus เป็น Metrics backend ใช้เก็บ Metrics data
  3. Grafana เป็น Dashboard ที่เอา data จาก Tempo กับ Prometheus มาแสดงผลให้ดูง่ายๆ

Tempo config

third-party/observe/tempo.yaml
third-party/observe/tempo.yaml
server:
http_listen_port: 3200
query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 100ms
distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http:
endpoint: "tempo:14286"
grpc: # for a production deployment you should only enable the receivers you need!
endpoint: "tempo:14250"
thrift_binary:
endpoint: "tempo:6832"
thrift_compact:
endpoint: "tempo:6831"
zipkin:
endpoint: "tempo:9411"
otlp:
protocols:
grpc:
endpoint: "tempo:4317"
http:
endpoint: "tempo:4318"
opencensus:
endpoint: "tempo:55678"
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
compactor:
compaction:
block_retention: 1h # overall Tempo trace retention. set for demo purposes
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /var/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
traces_storage:
path: /var/tempo/generator/traces
processor:
local_blocks:
filter_server_spans: false
flush_to_storage: true
storage:
trace:
backend: local # backend configuration to use
wal:
path: /var/tempo/wal # where to store the the wal locally
local:
path: /var/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator
generate_native_histograms: both

Prometheus config

third-party/observe/prometheus.yaml
third-party/observe/prometheus.yaml
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- localhost:9090
- job_name: tempo
static_configs:
- targets:
- tempo:3200

Grafana config

third-party/observe/grafana-datasource.yaml
third-party/observe/grafana-datasource.yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: false
version: 1
editable: false
jsonData:
httpMethod: GET
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus

docker compose file

third-party/observe/compose.yaml
third-party/observe/compose.yaml
services:
tempo-init:
image: grafana/tempo:latest
user: root
entrypoint:
- "chown"
- "10001:10001"
- "/var/tempo"
volumes:
- ./tempo-data:/var/tempo
tempo:
image: grafana/tempo:latest
command: [-config.file=/etc/tempo.yaml]
volumes:
- ./tempo.yaml:/etc/tempo.yaml
- ./tempo-data:/var/tempo
ports:
- "14268:14268" # jaeger ingest
- "3200:3200" # tempo
- "9095:9095" # tempo grpc
- "4317:4317" # otlp grpc
- "4318:4318" # otlp http
- "9411:9411" # zipkin
depends_on:
- tempo-init
prometheus:
image: prom/prometheus:latest
command:
- --config.file=/etc/prometheus.yaml
- --web.enable-remote-write-receiver
- --enable-feature=exemplar-storage
- --enable-feature=native-histograms
volumes:
- ./prometheus.yaml:/etc/prometheus.yaml
ports:
- "9090:9090"
grafana:
image: grafana/grafana:10.1.1
volumes:
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor metricsSummary
ports:
- "3000:3000"