in pkg/operator/apis/monitoring/v1/pod_config.go [159:209]
func (p *PodMonitoring) endpointScrapeConfig(index int, relabelCfgs []*relabel.Config, pool PrometheusSecretConfigs) (*promconfig.ScrapeConfig, error) {
// Filter targets that belong to selected pods.
selectors, err := relabelingsForSelector(p.Spec.Selector, p)
if err != nil {
return nil, err
}
relabelCfgs = append(relabelCfgs, selectors...)
metadataLabels := make(map[string]bool)
// The metadata list must be always set in general but we allow the null case
// for backwards compatibility and won't add any labels in that case.
if p.Spec.TargetLabels.Metadata != nil {
for _, l := range *p.Spec.TargetLabels.Metadata {
if !allowedPodMonitoringLabel[l] {
return nil, fmt.Errorf("metadata label %q not allowed, must be one of %v", l, allowedPodMonitoringLabels)
}
metadataLabels[l] = true
}
}
relabelCfgs = append(relabelCfgs, relabelingsForMetadata(metadataLabels)...)
// The namespace label is always set for PodMonitorings.
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_namespace"},
TargetLabel: labelNamespace,
})
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Replace,
Replacement: p.Name,
TargetLabel: "job",
})
// Drop any non-running pods if left unspecified or explicitly enabled.
if p.Spec.FilterRunning == nil || *p.Spec.FilterRunning {
relabelCfgs = append(relabelCfgs, &relabel.Config{
Action: relabel.Drop,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_pod_phase"},
Regex: relabel.MustNewRegexp("(Failed|Succeeded)"),
})
}
return endpointScrapeConfig(
p,
p.Spec.Endpoints[index],
relabelCfgs,
p.Spec.TargetLabels.FromPod,
p.Spec.Limits,
pool,
)
}