func updateTargetStatus()

in pkg/operator/target_status.go [325:368]


func updateTargetStatus(ctx context.Context, logger logr.Logger, kubeClient client.Client, targets []*prometheusv1.TargetsResult, podMonitorings []monitoringv1.PodMonitoringCRD) error {
	endpointMap, err := buildEndpointStatuses(targets)
	if err != nil {
		return err
	}

	var errs []error
	withStatuses := map[string]bool{}
	for job, endpointStatuses := range endpointMap {
		pm, err := getObjectByScrapeJobKey(job)
		if err != nil {
			errs = append(errs, fmt.Errorf("building target: %s: %w", job, err))
			continue
		}
		if pm == nil {
			// Skip hard-coded jobs which we do not patch.
			continue
		}
		withStatuses[pm.GetName()] = true
		pm.GetPodMonitoringStatus().EndpointStatuses = endpointStatuses

		if err := patchPodMonitoringStatus(ctx, kubeClient, pm, pm.GetPodMonitoringStatus()); err != nil {
			// Save and log any error encountered while patching the status.
			// We don't want to prematurely return if the error was transient
			// as we should continue patching all statuses before exiting.
			errs = append(errs, err)
			logger.Error(err, "patching status", "job", job, "gvk", pm.GetObjectKind().GroupVersionKind())
		}
	}

	// Any pod monitorings that exist but don't have endpoints should also be updated.
	for _, pm := range podMonitorings {
		if _, exists := withStatuses[pm.GetName()]; !exists {
			pm.GetPodMonitoringStatus().EndpointStatuses = []monitoringv1.ScrapeEndpointStatus{}
			if err := patchPodMonitoringStatus(ctx, kubeClient, pm, pm.GetPodMonitoringStatus()); err != nil {
				// Same reasoning as above for error handling.
				errs = append(errs, err)
				logger.Error(err, "patching empty status", "pm", pm.GetName(), "gvk", pm.GetObjectKind().GroupVersionKind())
			}
		}
	}

	return errors.Join(errs...)
}