in pkg/controllers/nodeclass/termination/controller.go [68:98]
func (c *Controller) finalize(ctx context.Context, nodeClass *v1alpha2.AKSNodeClass) (reconcile.Result, error) {
stored := nodeClass.DeepCopy()
if !controllerutil.ContainsFinalizer(nodeClass, v1alpha2.TerminationFinalizer) {
return reconcile.Result{}, nil
}
nodeClaimList := &karpv1.NodeClaimList{}
if err := c.kubeClient.List(ctx, nodeClaimList, client.MatchingFields{"spec.nodeClassRef.name": nodeClass.Name}); err != nil {
return reconcile.Result{}, fmt.Errorf("listing nodeclaims that are using nodeclass, %w", err)
}
if len(nodeClaimList.Items) > 0 {
c.recorder.Publish(WaitingOnNodeClaimTerminationEvent(nodeClass, lo.Map(nodeClaimList.Items, func(nc karpv1.NodeClaim, _ int) string { return nc.Name })))
return reconcile.Result{RequeueAfter: time.Minute * 10}, nil // periodically fire the event
}
// any other processing before removing NodeClass goes here
controllerutil.RemoveFinalizer(nodeClass, v1alpha2.TerminationFinalizer)
if !equality.Semantic.DeepEqual(stored, nodeClass) {
// We use client.MergeFromWithOptimisticLock because patching a list with a JSON merge patch
// can cause races due to the fact that it fully replaces the list on a change
// Here, we are updating the finalizer list
// https://github.com/kubernetes/kubernetes/issues/111643#issuecomment-2016489732
if err := c.kubeClient.Patch(ctx, nodeClass, client.MergeFromWithOptions(stored, client.MergeFromWithOptimisticLock{})); err != nil {
if errors.IsConflict(err) {
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, client.IgnoreNotFound(fmt.Errorf("removing termination finalizer, %w", err))
}
}
return reconcile.Result{}, nil
}