in pkg/operator/operator_config.go [554:731]
func (r *operatorConfigReconciler) makeAlertmanagerConfigs(ctx context.Context, spec *monitoringv1.AlertingSpec) (promconfig.AlertmanagerConfigs, map[string][]byte, error) {
var (
err error
configs promconfig.AlertmanagerConfigs
secretData = make(map[string][]byte)
)
amNamespacedName := types.NamespacedName{
Namespace: r.opts.OperatorNamespace,
Name: NameAlertmanager,
}
// If the default Alertmanager exists, append it to the list of spec.Alertmanagers.
var amSvc corev1.Service
if resourceErr := r.client.Get(ctx, amNamespacedName, &amSvc); resourceErr == nil {
// Alertmanager service should have one port defined, otherwise ignore.
if ports := amSvc.Spec.Ports; len(ports) > 0 {
// Assume first port on service is the correct endpoint.
port := ports[0].Port
svcDNSName := fmt.Sprintf("%s.%s:%d", amSvc.Name, amSvc.Namespace, port)
cfg := promconfig.DefaultAlertmanagerConfig
cfg.ServiceDiscoveryConfigs = discovery.Configs{
discovery.StaticConfig{
&targetgroup.Group{
Targets: []prommodel.LabelSet{{prommodel.AddressLabel: prommodel.LabelValue(svcDNSName)}},
},
},
}
configs = append(configs, &cfg)
}
}
for _, am := range spec.Alertmanagers {
// The upstream struct is lacking the omitempty field on the API version. Thus it looks
// like we explicitly set it to empty (invalid) even if left empty after marshalling.
// Thus we initialize the config with defaulting. Similar applies for the embedded HTTPConfig.
cfg := promconfig.DefaultAlertmanagerConfig
if am.PathPrefix != "" {
cfg.PathPrefix = am.PathPrefix
}
if am.Scheme != "" {
cfg.Scheme = am.Scheme
}
if am.APIVersion != "" {
cfg.APIVersion = promconfig.AlertmanagerAPIVersion(am.APIVersion)
}
// Timeout, APIVersion, PathPrefix, and Scheme all resort to defaults if left unspecified.
if am.Timeout != "" {
cfg.Timeout, err = prommodel.ParseDuration(am.Timeout)
if err != nil {
return nil, nil, fmt.Errorf("invalid timeout: %w", err)
}
}
// Authorization.
if am.Authorization != nil {
cfg.HTTPClientConfig.Authorization = &promcommonconfig.Authorization{
Type: am.Authorization.Type,
}
if c := am.Authorization.Credentials; c != nil {
b, err := getSecretKeyBytes(ctx, r.client, r.opts.PublicNamespace, c)
if err != nil {
return nil, nil, err
}
p := pathForSelector(r.opts.PublicNamespace, &monitoringv1.SecretOrConfigMap{Secret: c})
secretData[p] = b
cfg.HTTPClientConfig.Authorization.CredentialsFile = path.Join(secretsDir, p)
}
}
// TLS config.
if am.TLS != nil {
minVersion, err := monitoringv1.TLSVersionFromString(am.TLS.MinVersion)
if err != nil {
return nil, nil, fmt.Errorf("unable to convert TLS min version: %w", err)
}
maxVersion, err := monitoringv1.TLSVersionFromString(am.TLS.MaxVersion)
if err != nil {
return nil, nil, fmt.Errorf("unable to convert TLS min version: %w", err)
}
tlsCfg := promcommonconfig.TLSConfig{
InsecureSkipVerify: am.TLS.InsecureSkipVerify,
ServerName: am.TLS.ServerName,
MinVersion: minVersion,
MaxVersion: maxVersion,
}
if am.TLS.CA != nil {
p := pathForSelector(r.opts.PublicNamespace, am.TLS.CA)
b, err := getSecretOrConfigMapBytes(ctx, r.client, r.opts.PublicNamespace, am.TLS.CA)
if err != nil {
return nil, nil, err
}
secretData[p] = b
tlsCfg.CAFile = path.Join(secretsDir, p)
}
if am.TLS.Cert != nil {
p := pathForSelector(r.opts.PublicNamespace, am.TLS.Cert)
b, err := getSecretOrConfigMapBytes(ctx, r.client, r.opts.PublicNamespace, am.TLS.Cert)
if err != nil {
return nil, nil, err
}
secretData[p] = b
tlsCfg.CertFile = path.Join(secretsDir, p)
}
if am.TLS.KeySecret != nil {
p := pathForSelector(r.opts.PublicNamespace, &monitoringv1.SecretOrConfigMap{Secret: am.TLS.KeySecret})
b, err := getSecretKeyBytes(ctx, r.client, r.opts.PublicNamespace, am.TLS.KeySecret)
if err != nil {
return nil, nil, err
}
secretData[p] = b
tlsCfg.KeyFile = path.Join(secretsDir, p)
}
cfg.HTTPClientConfig.TLSConfig = tlsCfg
}
// Configure discovery of AM endpoints via Kubernetes API.
cfg.ServiceDiscoveryConfigs = discovery.Configs{
&discoverykube.SDConfig{
// Must instantiate a default client config explicitly as the follow_redirects
// field lacks the omitempty tag. Thus it looks like we explicitly set it to false
// even if left empty after marshalling.
HTTPClientConfig: promcommonconfig.DefaultHTTPClientConfig,
Role: discoverykube.RoleEndpoint,
NamespaceDiscovery: discoverykube.NamespaceDiscovery{
Names: []string{am.Namespace},
},
},
}
svcNameRE, err := relabel.NewRegexp(am.Name)
if err != nil {
return nil, nil, fmt.Errorf("cannot build regex from service name %q: %w", am.Name, err)
}
cfg.RelabelConfigs = append(cfg.RelabelConfigs, &relabel.Config{
Action: relabel.Keep,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_endpoints_name"},
Regex: svcNameRE,
})
if am.Port.StrVal != "" {
re, err := relabel.NewRegexp(am.Port.String())
if err != nil {
return nil, nil, fmt.Errorf("cannot build regex from port %q: %w", am.Port, err)
}
cfg.RelabelConfigs = append(cfg.RelabelConfigs, &relabel.Config{
Action: relabel.Keep,
SourceLabels: prommodel.LabelNames{"__meta_kubernetes_endpoint_port_name"},
Regex: re,
})
} else if am.Port.IntVal != 0 {
// The endpoints object does not provide a meta label for the port number. If the endpoint
// is backed by a pod we can inspect the pod port number label, but to make it work in general
// we simply override the port in the address label.
// If the endpoints has multiple ports, this will create duplicate targets but they will be
// deduplicated by the discovery engine.
re, err := relabel.NewRegexp(`(.+):\d+`)
if err != nil {
return nil, nil, fmt.Errorf("building address regex failed: %w", err)
}
cfg.RelabelConfigs = append(cfg.RelabelConfigs, &relabel.Config{
Action: relabel.Replace,
SourceLabels: prommodel.LabelNames{"__address__"},
Regex: re,
TargetLabel: "__address__",
Replacement: fmt.Sprintf("$1:%d", am.Port.IntVal),
})
}
// TODO(pintohutch): add support for basic_auth, oauth2, proxy_url, follow_redirects.
// Append to alertmanagers config array.
configs = append(configs, &cfg)
}
return configs, secretData, nil
}