func endpointScrapeConfig()

in confgenerator/config.go [318:422]


func endpointScrapeConfig(id, cfgName string, ep ScrapeEndpoint, relabelCfgs []*relabel.Config, limits *ScrapeLimits, env *CloudRunEnvironment) (*promconfig.ScrapeConfig, error) {
	if env == nil {
		return nil, fmt.Errorf("metadata from Cloud Run was not found")
	}
	labelSet := make(map[prommodel.LabelName]prommodel.LabelValue)
	labelSet[prommodel.AddressLabel] = prommodel.LabelValue("0.0.0.0:" + ep.Port)
	discoveryCfgs := discovery.Configs{
		discovery.StaticConfig{
			&targetgroup.Group{Targets: []prommodel.LabelSet{labelSet}},
		},
	}
	relabelCfgs = append(relabelCfgs,
		&relabel.Config{
			Action:      relabel.Replace,
			Replacement: cfgName,
			TargetLabel: "job",
		},
		&relabel.Config{
			Action:      relabel.Replace,
			TargetLabel: "cluster",
			Replacement: "__run__",
		},
		&relabel.Config{
			Action:      relabel.Replace,
			TargetLabel: "namespace",
			Replacement: env.Service,
		},
		// The `instance` label will be <faas.id>:<port> in the final metric.
		// But since <faas.id> is unavailable until the gcp resource detector
		// runs later in the pipeline we just populate the port for now.
		//
		// See the usage of PrefixResourceAttribute for when the rest of the
		// instance label is filled in.
		&relabel.Config{
			Action:      relabel.Replace,
			TargetLabel: "instance",
			Replacement: ep.Port,
		},
	)

	interval, err := prommodel.ParseDuration(ep.Interval)
	if err != nil {
		return nil, fmt.Errorf("invalid scrape interval: %w", err)
	}
	timeout := interval
	if ep.Timeout != "" {
		timeout, err = prommodel.ParseDuration(ep.Timeout)
		if err != nil {
			return nil, fmt.Errorf("invalid scrape timeout: %w", err)
		}
		if timeout > interval {
			return nil, fmt.Errorf("scrape timeout %v must not be greater than scrape interval %v", timeout, interval)
		}
	}

	metricsPath := "/metrics"
	if ep.Path != "" {
		metricsPath = ep.Path
	}

	var metricRelabelCfgs []*relabel.Config
	for _, r := range ep.MetricRelabeling {
		rcfg, err := convertRelabelingRule(r)
		if err != nil {
			return nil, err
		}
		metricRelabelCfgs = append(metricRelabelCfgs, rcfg)
	}

	scrapeCfg := &promconfig.ScrapeConfig{
		JobName:                 id,
		ServiceDiscoveryConfigs: discoveryCfgs,
		MetricsPath:             metricsPath,
		Scheme:                  ep.Scheme,
		Params:                  ep.Params,
		ScrapeInterval:          interval,
		ScrapeTimeout:           timeout,
		RelabelConfigs:          relabelCfgs,
		MetricRelabelConfigs:    metricRelabelCfgs,
		ScrapeProtocols:         promconfig.DefaultScrapeProtocols,
	}
	if limits != nil {
		scrapeCfg.SampleLimit = uint(limits.Samples)
		scrapeCfg.LabelLimit = uint(limits.Labels)
		scrapeCfg.LabelNameLengthLimit = uint(limits.LabelNameLength)
		scrapeCfg.LabelValueLengthLimit = uint(limits.LabelValueLength)
	}
	if err := scrapeCfg.Validate(promconfig.DefaultGlobalConfig); err != nil {
		return nil, fmt.Errorf("invalid scrape config: %w", err)
	}

	// The Prometheus configuration structs do not generally have validation methods and embed their
	// validation logic in the UnmarshalYAML methods. To keep things reasonable we don't re-validate
	// everything and simply do a final marshal-unmarshal cycle at the end to run all validation
	// upstream provides at the end of this method.
	b, err := yaml.Marshal(scrapeCfg)
	if err != nil {
		return nil, fmt.Errorf("scrape config cannot be marshalled: %w", err)
	}
	var scrapeCfgCopy promconfig.ScrapeConfig
	if err := yaml.Unmarshal(b, &scrapeCfgCopy); err != nil {
		return nil, fmt.Errorf("invalid scrape configuration: %w", err)
	}
	return scrapeCfg, nil
}