recipes/host-and-kubelet-metrics/collector-config.yaml (90 lines of code) (raw):
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: opentelemetry.io/v1alpha1
kind: OpenTelemetryCollector
metadata:
name: otel
spec:
image: otel/opentelemetry-collector-contrib:0.112.0
mode: daemonset
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumes:
- name: hostfs
hostPath:
path: /
volumeMounts:
- mountPath: /hostfs
name: hostfs
config: |
receivers:
hostmetrics:
collection_interval: 10s
root_path: /hostfs
scrapers:
cpu:
disk:
load:
memory:
network:
paging:
processes:
filesystem:
exclude_mount_points:
match_type: regexp
mount_points:
- /var/lib/kubelet/*
kubeletstats:
collection_interval: 10s
auth_type: "none"
endpoint: "http://${env:K8S_NODE_NAME}:10255"
insecure_skip_verify: true
processors:
resourcedetection:
detectors: [gcp]
timeout: 10s
resource:
attributes:
- action: insert
key: k8s.node.name
value: ${K8S_NODE_NAME}
transform:
# "location", "cluster", "namespace", "job", "instance", and "project_id" are reserved, and
# metrics containing these labels will be rejected. Prefix them with exported_ to prevent this.
metric_statements:
- context: datapoint
statements:
- set(attributes["exported_location"], attributes["location"])
- delete_key(attributes, "location")
- set(attributes["exported_cluster"], attributes["cluster"])
- delete_key(attributes, "cluster")
- set(attributes["exported_namespace"], attributes["namespace"])
- delete_key(attributes, "namespace")
- set(attributes["exported_job"], attributes["job"])
- delete_key(attributes, "job")
- set(attributes["exported_instance"], attributes["instance"])
- delete_key(attributes, "instance")
- set(attributes["exported_project_id"], attributes["project_id"])
- delete_key(attributes, "project_id")
batch:
# batch metrics before sending to reduce API usage
send_batch_max_size: 200
send_batch_size: 200
timeout: 5s
memory_limiter:
# drop metrics if memory usage gets too high
check_interval: 1s
limit_percentage: 65
spike_limit_percentage: 20
exporters:
debug:
verbosity: detailed
googlemanagedprometheus:
metric:
extra_metrics_config:
enable_target_info: false
resource_filters:
- regex: "k8s.*"
service:
pipelines:
metrics:
receivers: [hostmetrics, kubeletstats]
processors: [batch, memory_limiter, resourcedetection, transform, resource]
exporters: [googlemanagedprometheus, debug]