in otelcollector/prometheusreceiver/targetallocator/manager.go [105:166]
func (m *Manager) sync(compareHash uint64, httpClient *http.Client) (uint64, error) {
m.settings.Logger.Debug("Syncing target allocator jobs")
scrapeConfigsResponse, err := getScrapeConfigsResponse(httpClient, m.cfg.Endpoint)
if err != nil {
m.settings.Logger.Error("Failed to retrieve job list", zap.Error(err))
return 0, err
}
hash, err := getScrapeConfigHash(scrapeConfigsResponse)
if err != nil {
m.settings.Logger.Error("Failed to hash job list", zap.Error(err))
return 0, err
}
if hash == compareHash {
// no update needed
return hash, nil
}
// Copy initial scrape configurations
initialConfig := make([]*promconfig.ScrapeConfig, len(m.initialScrapeConfigs))
copy(initialConfig, m.initialScrapeConfigs)
m.promCfg.ScrapeConfigs = initialConfig
for jobName, scrapeConfig := range scrapeConfigsResponse {
var httpSD promHTTP.SDConfig
if m.cfg.HTTPSDConfig == nil {
httpSD = promHTTP.SDConfig{
RefreshInterval: model.Duration(30 * time.Second),
}
} else {
httpSD = promHTTP.SDConfig(*m.cfg.HTTPSDConfig)
}
escapedJob := url.QueryEscape(jobName)
httpSD.URL = fmt.Sprintf("%s/jobs/%s/targets?collector_id=%s", m.cfg.Endpoint, escapedJob, m.cfg.CollectorID)
err = configureSDHTTPClientConfigFromTA(&httpSD, m.cfg)
if err != nil {
m.settings.Logger.Error("Failed to configure http client config", zap.Error(err))
return 0, err
}
httpSD.HTTPClientConfig.FollowRedirects = false
scrapeConfig.ServiceDiscoveryConfigs = discovery.Configs{
&httpSD,
}
if m.cfg.HTTPScrapeConfig != nil {
scrapeConfig.HTTPClientConfig = commonconfig.HTTPClientConfig(*m.cfg.HTTPScrapeConfig)
}
m.promCfg.ScrapeConfigs = append(m.promCfg.ScrapeConfigs, scrapeConfig)
}
err = m.applyCfg()
if err != nil {
m.settings.Logger.Error("Failed to apply new scrape configuration", zap.Error(err))
return 0, err
}
return hash, nil
}