in pkg/controllers/sync_context.go [492:595]
func (sc *SyncContext) sync(ctx context.Context) syncResult {
// Multiple resources are required to start
// and run the MySQL Cluster in K8s. Create
// them if they do not exist yet.
if sr := sc.ensureAllResources(ctx); sr.stopSync() {
if err := sr.getError(); err != nil {
klog.Errorf("Failed to ensure that all the required resources exist. Error : %v", err)
}
return sr
}
// All resources and workloads exist.
// Continue further only if all the workloads are ready.
if sr := sc.ensureWorkloadsReadiness(); sr.stopSync() {
// Some workloads are not ready yet => The MySQL Cluster is not fully up yet.
// Any further config changes cannot be processed until the pods are ready.
return sr
}
// The workloads are ready => MySQL Cluster is healthy.
// Before starting to handle any new changes from the Ndb
// Custom object, verify that the MySQL Cluster is in sync
// with the current config in the config map. This is to avoid
// applying config changes midway through a previous config
// change. This also means that this entire reconciliation
// will be spent only on this verification. If the MySQL
// Cluster has the expected config, the K8s config map will be
// updated with the new config, specified by the Ndb object,
// at the end of this loop. The new changes will be applied to
// the MySQL Cluster starting from the next reconciliation loop.
// First pass of MySQL Server reconciliation.
// If any scale down was requested, it will be handled in this pass.
// This is done separately to ensure that the MySQL Servers are shut
// down before possibly reducing the number of API sections in config.
if sr := sc.mysqldController.HandleScaleDown(ctx, sc); sr.stopSync() {
return sr
}
// Reconcile Management Server by updating the statefulSet definition.
// Management StatefulSet uses the default RollingUpdate strategy and
// the update will be rolled out by the controller once the StatefulSet
// is patched.
if sr := sc.reconcileManagementNodeStatefulSet(ctx); sr.stopSync() {
if err := sr.getError(); err == nil {
// ManagementNodeStatefulSet patched successfully
if sc.ndb.HasSyncError() {
mgmdReplicaCount := *(sc.mgmdNodeSfset.Spec.Replicas)
klog.Infof("Delete largest ordinal pod manually to avoid delay in restart")
_, err := sc.deletePodOnStsUpdate(ctx, sc.mgmdNodeSfset, mgmdReplicaCount-1)
return errorWhileProcessing(err)
}
}
return sr
}
klog.Info("All Management node pods are up-to-date and ready")
// Reconcile Data Nodes by updating their statefulSet definition
if sr := sc.reconcileDataNodeStatefulSet(ctx); sr.stopSync() {
return sr
}
// Restart Data Node pods, if required, to update their definitions
if sr := sc.ensureDataNodePodVersion(ctx); sr.stopSync() {
return sr
}
// Second pass of MySQL Server reconciliation
// Reconcile the rest of spec/config change in MySQL Server StatefulSet
if sr := sc.mysqldController.ReconcileStatefulSet(ctx, sc); sr.stopSync() {
return sr
}
// Handle online add data node request
if sr := sc.ndbmtdController.handleAddNodeOnline(ctx, sc); sr.stopSync() {
return sr
}
// Reconcile the Root user
if sr := sc.mysqldController.reconcileRootUser(ctx, sc); sr.stopSync() {
return sr
}
// At this point, the MySQL Cluster is in sync with the configuration in the config map.
// The configuration in the config map has to be checked to see if it is still the
// desired config specified in the Ndb object.
klog.Infof("The generation of the config in the configMap : \"%d\"", sc.configSummary.NdbClusterGeneration)
// Check if the config map has processed the latest NdbCluster Generation
patched, err := sc.patchConfigMap(ctx)
if patched {
// Only the config map was updated during this loop.
// The next loop will actually start the sync
return finishProcessing()
} else if err != nil {
klog.Errorf("Failed to patch the ConfigMap. Error : %v", err)
return errorWhileProcessing(err)
}
// MySQL Cluster in sync with the NdbCluster spec
sc.syncSuccess = true
return finishProcessing()
}