func crossReferencePickedClustersAndDeDupBindings()

in pkg/scheduler/framework/frameworkutils.go [120:224]


func crossReferencePickedClustersAndDeDupBindings(
	crpName string,
	policy *placementv1beta1.ClusterSchedulingPolicySnapshot,
	picked ScoredClusters,
	unscheduled, obsolete []*placementv1beta1.ClusterResourceBinding,
) (toCreate, toDelete []*placementv1beta1.ClusterResourceBinding, toPatch []*bindingWithPatch, err error) {
	// Pre-allocate with a reasonable capacity.
	toCreate = make([]*placementv1beta1.ClusterResourceBinding, 0, len(picked))
	toPatch = make([]*bindingWithPatch, 0, 20)
	toDelete = make([]*placementv1beta1.ClusterResourceBinding, 0, 20)

	// Build a map of picked scored clusters for quick lookup.
	pickedMap := make(map[string]*ScoredCluster)
	for _, scored := range picked {
		pickedMap[scored.Cluster.Name] = scored
	}

	// Build a map of all clusters that have been cross-referenced.
	checked := make(map[string]bool)

	for _, binding := range obsolete {
		scored, ok := pickedMap[binding.Spec.TargetCluster]
		checked[binding.Spec.TargetCluster] = true

		if !ok {
			// The binding's target cluster is no longer picked in the current run; mark the
			// binding as unscheduled.
			toDelete = append(toDelete, binding)
			continue
		}

		// The binding's target cluster is picked again in the current run; yet the binding
		// is originally created/updated in accordance with an out-of-date scheduling policy.
		// Add the binding to the toPatch list. We will simply keep the binding's state as
		// it could be "scheduled" or "bound".
		toPatch = append(toPatch, patchBindingFromScoredCluster(binding, binding.Spec.State, scored, policy))
	}

	for _, binding := range unscheduled {
		scored, ok := pickedMap[binding.Spec.TargetCluster]
		checked[binding.Spec.TargetCluster] = true
		if !ok {
			// this cluster is not picked up again, so we can skip it
			continue
		}
		// The binding's target cluster is picked again in the current run; yet the binding
		// is originally de-selected by the previous scheduling round.
		// Add the binding to the toPatch list so that we won't create more and more bindings.
		// We need to recover the previous state before the binding is marked as unscheduled.
		var desiredState placementv1beta1.BindingState
		// we recorded the previous state in the binding's annotation
		currentAnnotation := binding.GetAnnotations()
		if previousState, exist := currentAnnotation[placementv1beta1.PreviousBindingStateAnnotation]; exist {
			desiredState = placementv1beta1.BindingState(previousState)
			// remove the annotation just to avoid confusion.
			delete(currentAnnotation, placementv1beta1.PreviousBindingStateAnnotation)
			binding.SetAnnotations(currentAnnotation)
		} else {
			return nil, nil, nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to find the previous state of an unscheduled binding: %+v", binding))
		}
		toPatch = append(toPatch, patchBindingFromScoredCluster(binding, desiredState, scored, policy))
	}

	for _, scored := range picked {
		if _, ok := checked[scored.Cluster.Name]; !ok {
			// The cluster is newly picked in the current run; it does not have an associated binding in presence.
			name, err := uniquename.NewClusterResourceBindingName(crpName, scored.Cluster.Name)
			if err != nil {
				// Cannot get a unique name for the binding; normally this should never happen.
				return nil, nil, nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to cross reference picked clusters and existing bindings: %w", err))
			}
			affinityScore := scored.Score.AffinityScore
			topologySpreadScore := scored.Score.TopologySpreadScore
			binding := &placementv1beta1.ClusterResourceBinding{
				ObjectMeta: metav1.ObjectMeta{
					Name: name,
					Labels: map[string]string{
						placementv1beta1.CRPTrackingLabel: crpName,
					},
					Finalizers: []string{placementv1beta1.SchedulerCRBCleanupFinalizer},
				},
				Spec: placementv1beta1.ResourceBindingSpec{
					State: placementv1beta1.BindingStateScheduled,
					// Leave the associated resource snapshot name empty; it is up to another controller
					// to fulfill this field.
					SchedulingPolicySnapshotName: policy.Name,
					TargetCluster:                scored.Cluster.Name,
					ClusterDecision: placementv1beta1.ClusterDecision{
						ClusterName: scored.Cluster.Name,
						Selected:    true,
						ClusterScore: &placementv1beta1.ClusterScore{
							AffinityScore:       &affinityScore,
							TopologySpreadScore: &topologySpreadScore,
						},
						Reason: fmt.Sprintf(resourceScheduleSucceededWithScoreMessageFormat, scored.Cluster.Name, affinityScore, topologySpreadScore),
					},
				},
			}

			toCreate = append(toCreate, binding)
		}
	}

	return toCreate, toDelete, toPatch, nil
}