Description
What's wrong?
Hi!
We’re in the process of migrating from Promtail to Grafana Alloy, since Promtail is being deprecated.
While doing so, we've noticed that Grafana Alloy consumes significantly more CPU and memory under the same workload and configuration pattern.
With Promtail, typical CPU usage was around 0.1–0.2 cores and memory stayed below 150Mi.
With Grafana Alloy, we’re seeing 1–2 CPU cores used consistently and memory consumption between 300Mi and 1GiB — roughly a 10x increase in CPU usage and 2–6x in memory.
This happens even with WAL disabled and with very similar relabeling logic and log volume.
The issue might be related to the high number of goroutines — go_goroutines is reporting 1217, which seems excessive for such a low log volume.
Steps to reproduce
Running Grafana Alloy on a node with only ~0.005 MB/s log volume (measured using rate(loki_write_sent_bytes_total{instance="grafana-alloy-9ng97"}[5m]) / 1024 / 1024
) results in over 1 CPU core usage and 400 MB of memory consumption.
System information
GKE v1.31.8-gke.1113000
Software version
alloy:v1.7.2
Configuration
Config:
loki.write "loki_tsdb" {
endpoint {
url = "..."
tls_config {
ca_file = "..."
cert_file = "..."
key_file = "..."
insecure_skip_verify = true
}
}
external_labels = {
"k8s_cluster" = "...",
}
}
discovery.kubernetes "kubernetes_pods" {
role = "pod"
}
discovery.relabel "kubernetes_pods" {
targets = discovery.kubernetes.kubernetes_pods.targets
rule {
source_labels = ["__meta_kubernetes_pod_controller_name"]
regex = "([0-9a-z-.]+?)(-[0-9a-f]{8,10})?"
target_label = "__tmp_controller_name"
}
rule {
source_labels = ["__meta_kubernetes_pod_label_app", "__meta_kubernetes_pod_label_app_kubernetes_io_name", "__tmp_controller_name", "__meta_kubernetes_pod_name"]
regex = "^;*([^;]+)(;.*)?$"
target_label = "app"
}
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_instance", "__meta_kubernetes_pod_label_instance"]
regex = "^;*([^;]+)(;.*)?$"
target_label = "instance"
}
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_component", "__meta_kubernetes_pod_label_component"]
regex = "^;*([^;]+)(;.*)?$"
target_label = "component"
}
rule {
source_labels = ["__meta_kubernetes_pod_node_name"]
target_label = "node_name"
}
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
rule {
source_labels = ["namespace", "app"]
separator = "/"
target_label = "job"
}
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container_name"
}
rule {
source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"]
separator = "/"
target_label = "__path__"
replacement = "/var/log/pods/*$1/*.log"
}
rule {
source_labels = ["__meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash", "__meta_kubernetes_pod_annotation_kubernetes_io_config_hash", "__meta_kubernetes_pod_container_name"]
separator = "/"
regex = "true/(.*)"
target_label = "__path__"
replacement = "/var/log/pods/*$1/*.log"
}
rule {
target_label = "k8s_cluster"
replacement = "..."
}
rule {
target_label = "project"
replacement = "..."
}
}
local.file_match "kubernetes_pods" {
path_targets = discovery.relabel.kubernetes_pods.output
}
loki.process "kubernetes_pods" {
forward_to = [
loki.write.loki_tsdb.receiver,
]
stage.cri { }
stage.label_drop {
values = ["filename"]
}
}
loki.source.file "kubernetes_pods" {
targets = local.file_match.kubernetes_pods.targets
forward_to = [loki.process.kubernetes_pods.receiver]
legacy_positions_file = "/run/promtail/positions.yaml"
tail_from_end = true
}
Logs