in pkg/operator/apis/monitoring/v1/pod_config.go [246:294]
func (c *ClusterPodMonitoring) endpointScrapeConfig(index int, relabelCfgs []*relabel.Config, pool PrometheusSecretConfigs) (*promconfig.ScrapeConfig, error) {
// Filter targets that belong to selected pods.
selectors, err := relabelingsForSelector(c.Spec.Selector, c)
if err != nil {
return nil, err
}
relabelCfgs = append(relabelCfgs, selectors...)
metadataLabels := make(map[string]bool)
// The metadata list must be always set in general but we allow the null case
// for backwards compatibility. In that case we must always add the namespace label.
if c.Spec.TargetLabels.Metadata == nil {
metadataLabels = map[string]bool{
labelNamespace: true,
}
} else {
for _, l := range *c.Spec.TargetLabels.Metadata {
if !allowedClusterPodMonitoringLabel[l] {
return nil, fmt.Errorf("metadata label %q not allowed, must be one of %v", l, allowedClusterPodMonitoringLabels)
}
metadataLabels[l] = true
}
}
relabelCfgs = append(relabelCfgs, relabelingsForMetadata(metadataLabels)...)
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Replace,
Replacement: c.Name,
TargetLabel: "job",
})
// Drop any non-running pods if left unspecified or explicitly enabled.
if c.Spec.FilterRunning == nil || *c.Spec.FilterRunning {
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Drop,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_pod_phase"},
Regex: relabel.MustNewRegexp("(Failed|Succeeded)"),
})
}
return endpointScrapeConfig(
c,
c.Spec.Endpoints[index],
relabelCfgs,
c.Spec.TargetLabels.FromPod,
c.Spec.Limits,
pool,
)
}