in pkg/operator/apis/monitoring/v1/pod_config.go [296:421]
func endpointScrapeConfig(
m PodMonitoringCRD,
ep ScrapeEndpoint,
relabelCfgs []*relabel.Config,
podLabels []LabelMapping,
limits *ScrapeLimits,
pool PrometheusSecretConfigs,
) (*promconfig.ScrapeConfig, error) {
id := m.GetKey()
// Configure how Prometheus talks to the Kubernetes API server to discover targets.
// This configuration is the same for all scrape jobs (esp. selectors).
// This ensures that Prometheus can reuse the underlying client and caches, which reduces
// load on the Kubernetes API server.
discoveryCfgs := discovery.Configs{
&discoverykube.SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
Role: discoverykube.RolePod,
// Drop all potential targets not the same node as the collector. The $(NODE_NAME) variable
// is interpolated by the config reloader sidecar before the config reaches the Prometheus collector.
// Doing it through selectors rather than relabeling should substantially reduce the client and
// server side load.
Selectors: []discoverykube.SelectorConfig{
{
Role: discoverykube.RolePod,
Field: fmt.Sprintf("spec.nodeName=$(%s)", EnvVarNodeName),
},
},
},
}
relabelCfgs = append(relabelCfgs,
// Use the pod name as the primary identifier in the instance label. Unless the pod
// is controlled by a DaemonSet, in which case the node name will be used.
// This provides a better user experience on dashboards which template on the instance label
// and expect it to have meaningful value, such as common node exporter dashboards.
//
// Save the value in a temporary label and use it further down.
&relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_pod_name"},
TargetLabel: "__tmp_instance",
},
&relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_pod_controller_kind", "__meta_kubernetes_pod_node_name"},
Regex: relabel.MustNewRegexp(`DaemonSet;(.*)`),
TargetLabel: "__tmp_instance",
Replacement: "$1",
},
)
// Filter targets by the configured port.
if ep.Port.StrVal != "" {
portValue, err := relabel.NewRegexp(ep.Port.StrVal)
if err != nil {
return nil, fmt.Errorf("invalid port name %q: %w", ep.Port, err)
}
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Keep,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_pod_container_port_name"},
Regex: portValue,
})
// The instance label being the pod name would be ideal UX-wise. But we cannot be certain
// that multiple metrics endpoints on a pod don't expose metrics with the same name. Thus
// we have to disambiguate along the port as well.
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__tmp_instance", "__meta_kubernetes_pod_container_port_name"},
Regex: relabel.MustNewRegexp("(.+);(.+)"),
Replacement: "$1:$2",
TargetLabel: "instance",
})
} else if ep.Port.IntVal != 0 {
// Prometheus generates a target candidate for each declared port in a pod.
// If a container in a pod has no declared port, a single target candidate is generated for
// that container.
//
// If a numeric port is specified for scraping but not declared in the pod, we still
// want to allow scraping it. For that we must ensure that we produce a single final output
// target for that numeric port. The only way to achieve this is to produce identical output
// targets for all incoming target candidates for that pod and producing identical output
// targets for each.
// This requires leaving the container label empty (or at a singleton value) even if it is
// requested as an output label via .targetLabels.metadata. This aligns with the Pod specification,
// which requires port names in a Pod to be unique but not port numbers. Thus, the container is
// potentially ambiguous for numerical ports in any case.
// First, drop the container label even if it was added before.
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.LabelDrop,
Regex: relabel.MustNewRegexp(labelContainer),
})
// Then, rewrite the instance and __address__ for each candidate to the same values.
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__tmp_instance"},
Replacement: fmt.Sprintf("$1:%d", ep.Port.IntVal),
TargetLabel: "instance",
})
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_pod_ip"},
Replacement: fmt.Sprintf("$1:%d", ep.Port.IntVal),
TargetLabel: "__address__",
})
} else {
return nil, errors.New("port must be set")
}
// Add pod labels.
pCfgs, err := labelMappingRelabelConfigs(podLabels, "__meta_kubernetes_pod_label_")
if err != nil {
return nil, fmt.Errorf("invalid pod label mapping: %w", err)
}
relabelCfgs = append(relabelCfgs, pCfgs...)
httpCfg, err := ep.ToPrometheusConfig(m, pool)
if err != nil {
return nil, fmt.Errorf("unable to parse or invalid Prometheus HTTP client config: %w", err)
}
if err := httpCfg.Validate(); err != nil {
return nil, fmt.Errorf("invalid Prometheus HTTP client config: %w", err)
}
return buildPrometheusScrapeConfig(fmt.Sprintf("%s/%s", id, &ep.Port), discoveryCfgs, httpCfg, relabelCfgs, limits, ep)
}