Loading...
Loading...
Guide for implementing Grafana Tempo - a high-scale distributed tracing backend for OpenTelemetry traces. Use when configuring Tempo deployments, setting up storage backends (S3, Azure Blob, GCS), writing TraceQL queries, deploying via Helm, understanding trace structure, or troubleshooting Tempo issues on Kubernetes.
npx skill4agent add julianobarbosa/claude-code-skills tempoX-Scope-OrgID| Component | Purpose |
|---|---|
| Distributor | Entry point for trace data, routes to ingesters via consistent hash ring |
| Ingester | Buffers traces in memory, creates Parquet blocks, flushes to storage |
| Query Frontend | Query orchestration, shards blockID space, coordinates queriers |
| Querier | Locates traces in ingesters or storage using bloom filters |
| Compactor | Compresses blocks, deduplicates data, manages retention |
| Metrics Generator | Optional: derives metrics from traces |
Applications → Collector → Distributor → Ingester → Object Storage
↓
Consistent Hash Ring
(routes by traceID)Query Request → Query Frontend → Queriers → Ingesters (recent data)
↓ ↓
Block Sharding Object Storage (historical data)
↓ ↓
Parallel Querier Work Bloom Filters + Indexes-target=all-target=scalable-single-binary# Using tempo-distributed Helm chart
distributor:
replicas: 3
ingester:
replicas: 3
querier:
replicas: 2
queryFrontend:
replicas: 2
compactor:
replicas: 1helm repo add grafana https://grafana.github.io/helm-charts
helm repo updatehelm install tempo grafana/tempo-distributed \
--namespace monitoring \
--values values.yaml# Storage configuration
storage:
trace:
backend: azure # or s3, gcs
azure:
container_name: tempo-traces
storage_account_name: mystorageaccount
use_federated_token: true # Workload Identity
# Distributor
distributor:
replicas: 3
resources:
requests:
cpu: 500m
memory: 2Gi
limits:
memory: 4Gi
# Ingester
ingester:
replicas: 3
resources:
requests:
cpu: 1000m
memory: 2Gi
limits:
memory: 8Gi # Spikes to 8GB periodically
persistence:
enabled: true
size: 20Gi
# Querier
querier:
replicas: 2
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
memory: 4Gi
# Query Frontend
queryFrontend:
replicas: 2
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
memory: 2Gi
# Compactor
compactor:
replicas: 1
resources:
requests:
cpu: 500m
memory: 2Gi
limits:
memory: 6Gi
# Block retention
compactor:
compaction:
block_retention: 336h # 14 days
# Gateway for external access
gateway:
enabled: true
replicas: 1
# Metrics Generator (optional)
metricsGenerator:
enabled: falsestorage:
trace:
backend: azure
azure:
container_name: tempo-traces
storage_account_name: <storage-account-name>
# Option 1: Workload Identity (Recommended)
use_federated_token: true
# Option 2: User-Assigned Managed Identity
use_managed_identity: true
user_assigned_id: <identity-client-id>
# Option 3: Account Key (Dev only)
# storage_account_key: <account-key>
endpoint_suffix: blob.core.windows.net
hedge_requests_at: 400ms
hedge_requests_up_to: 2storage:
trace:
backend: s3
s3:
bucket: my-tempo-bucket
region: us-east-1
endpoint: s3.us-east-1.amazonaws.com
# Use IAM roles or access keys
access_key: <access-key>
secret_key: <secret-key>storage:
trace:
backend: gcs
gcs:
bucket_name: my-tempo-bucket
# Uses Workload Identity or service account# Simplest query - all spans
{ }
# Filter by service
{ resource.service.name = "frontend" }
# Filter by operation
{ span:name = "GET /api/orders" }
# Filter by status
{ span:status = error }
# Filter by duration
{ span:duration > 500ms }
# Multiple conditions
{ resource.service.name = "api" && span:status = error }# Direct parent-child relationship
{ resource.service.name = "frontend" } > { resource.service.name = "api" }
# Ancestor-descendant relationship
{ span:name = "GET /api/products" } >> { span.db.system = "postgresql" }
# Sibling relationship
{ span:name = "span-a" } ~ { span:name = "span-b" }# Count spans
{ } | count() > 10
# Average duration
{ } | avg(span:duration) > 20ms
# Max duration
{ span:status = error } | max(span:duration)# Rate of errors
{ span:status = error } | rate()
# Count over time
{ span:name = "GET /:endpoint" } | count_over_time()
# Percentile latency
{ span:name = "GET /:endpoint" } | quantile_over_time(span:duration, .99)
# Group by service
{ span:status = error } | rate() by(resource.service.name)
# Top 10 by error rate
{ span:status = error } | rate() by(resource.service.name) | topk(10)| Field | Description |
|---|---|
| Operation name |
| Elapsed time (e.g., "10ms", "1.5s") |
| |
| |
| Total trace duration |
| Root span name |
| Root span service |
| Scope | Example | Description |
|---|---|---|
| | Span-level attributes |
| | Resource attributes |
| | Event attributes |
| | Link attributes |
| Protocol | Port | Endpoint |
|---|---|---|
| OTLP gRPC | 4317 | |
| OTLP HTTP | 4318 | |
| Jaeger gRPC | 14250 | - |
| Jaeger Thrift HTTP | 14268 | |
| Jaeger Thrift Compact | 6831 | UDP |
| Jaeger Thrift Binary | 6832 | UDP |
| Zipkin | 9411 | |
# Enable multi-tenancy
multitenancy_enabled: true
# All requests must include X-Scope-OrgID header
# Example:
# curl -H "X-Scope-OrgID: tenant-1" http://tempo:3200/api/traces/<traceID>az aks update \
--name <aks-cluster> \
--resource-group <rg> \
--enable-oidc-issuer \
--enable-workload-identityaz identity create \
--name tempo-identity \
--resource-group <rg>
IDENTITY_CLIENT_ID=$(az identity show --name tempo-identity --resource-group <rg> --query clientId -o tsv)az role assignment create \
--role "Storage Blob Data Contributor" \
--assignee-object-id <principal-id> \
--scope /subscriptions/<sub>/resourceGroups/<rg>/providers/Microsoft.Storage/storageAccounts/<storage>az identity federated-credential create \
--name tempo-federated \
--identity-name tempo-identity \
--resource-group <rg> \
--issuer <aks-oidc-issuer-url> \
--subject system:serviceaccount:monitoring:tempo \
--audiences api://AzureADTokenExchangeserviceAccount:
annotations:
azure.workload.identity/client-id: <IDENTITY_CLIENT_ID>
podLabels:
azure.workload.identity/use: "true"
storage:
trace:
azure:
use_federated_token: trueaz storage container create --name tempo-traces --account-name <storage># Verify RBAC assignment
az role assignment list --scope <storage-scope>
# Assign if missing
az role assignment create \
--role "Storage Blob Data Contributor" \
--assignee-object-id <principal-id> \
--scope <storage-scope>ingester:
resources:
limits:
memory: 16Gi # Increase from 8Giquerier:
query_timeout: 5m
max_concurrent_queries: 20# Check pod status
kubectl get pods -n monitoring -l app.kubernetes.io/name=tempo
# Check distributor logs
kubectl logs -n monitoring -l app.kubernetes.io/component=distributor --tail=100
# Check ingester logs
kubectl logs -n monitoring -l app.kubernetes.io/component=ingester --tail=100
# Verify readiness
kubectl exec -it <tempo-pod> -n monitoring -- wget -qO- http://localhost:3200/ready
# Check ring status
kubectl port-forward svc/tempo-distributor 3200:3200 -n monitoring
curl http://localhost:3200/distributor/ring# Get trace by ID
GET /api/traces/<traceID>
# Search traces (TraceQL)
GET /api/search?q={resource.service.name="api"}
# Search tags
GET /api/search/tags
GET /api/search/tag/<tag>/valuesGET /ready
GET /metrics