in controllers/controller.go [118:216]
func (r *EtcdadmClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) {
log := r.Log.WithValues("etcdadmcluster", req.NamespacedName)
// Lookup the etcdadm cluster object
etcdCluster := &etcdv1.EtcdadmCluster{}
if err := r.Client.Get(ctx, req.NamespacedName, etcdCluster); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get etcdadm cluster")
return ctrl.Result{}, err
}
// Fetch the CAPI Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, etcdCluster.ObjectMeta)
if err != nil {
log.Error(err, "Failed to retrieve owner Cluster from the API Server")
return ctrl.Result{}, err
}
if cluster == nil {
log.Info("Cluster Controller has not yet set OwnerRef on etcd")
return ctrl.Result{}, nil
}
if !cluster.Status.InfrastructureReady {
log.Info("Infrastructure cluster is not yet ready")
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
if annotations.IsPaused(cluster, etcdCluster) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
}
// Initialize the patch helper.
patchHelper, err := patch.NewHelper(etcdCluster, r.Client)
if err != nil {
log.Error(err, "Failed to configure the patch helper")
return ctrl.Result{Requeue: true}, nil
}
// Add finalizer first if it does not exist to avoid the race condition between init and delete
if !controllerutil.ContainsFinalizer(etcdCluster, etcdv1.EtcdadmClusterFinalizer) {
controllerutil.AddFinalizer(etcdCluster, etcdv1.EtcdadmClusterFinalizer)
// patch and return right away instead of reusing the main defer,
// because the main defer may take too much time to get cluster status
patchOpts := []patch.Option{patch.WithStatusObservedGeneration{}}
if err := patchHelper.Patch(ctx, etcdCluster, patchOpts...); err != nil {
log.Error(err, "Failed to patch EtcdadmCluster to add finalizer")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
defer func() {
etcdMachines, err := r.checkOwnedMachines(ctx, log, etcdCluster, cluster)
if err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
return
} else {
if err := r.updateMachinesEtcdReadyLabel(ctx, log, etcdMachines); err != nil {
log.Error(err, "Failed to update etcd ready labels in machines")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
// Always attempt to update status.
if err := r.updateStatus(ctx, etcdCluster, cluster, etcdMachines); err != nil {
log.Error(err, "Failed to update EtcdadmCluster Status")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}
if conditions.IsFalse(etcdCluster, etcdv1.EtcdMachinesSpecUpToDateCondition) &&
conditions.GetReason(etcdCluster, etcdv1.EtcdMachinesSpecUpToDateCondition) == etcdv1.EtcdRollingUpdateInProgressReason {
// set ready to false, so that CAPI cluster controller will pause KCP so it doesn't keep checking if endpoints are updated
etcdCluster.Status.Ready = false
}
// Always attempt to Patch the EtcdadmCluster object and status after each reconciliation.
if err := patchEtcdCluster(ctx, patchHelper, etcdCluster); err != nil {
log.Error(err, "Failed to patch EtcdadmCluster")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
if reterr == nil && !res.Requeue && !(res.RequeueAfter > 0) && etcdCluster.ObjectMeta.DeletionTimestamp.IsZero() {
if !etcdCluster.Status.Ready {
res = ctrl.Result{RequeueAfter: 20 * time.Second}
}
}
}()
if !etcdCluster.ObjectMeta.DeletionTimestamp.IsZero() {
// Handle deletion reconciliation loop.
return r.reconcileDelete(ctx, etcdCluster, cluster)
}
return r.reconcile(ctx, etcdCluster, cluster)
}