func()

in pkg/controllers/sync_context.go [279:420]


func (sc *SyncContext) ensureAllResources(ctx context.Context) syncResult {
	var err error
	var resourceExists bool

	// create pod disruption budgets
	if resourceExists, err = sc.ensurePodDisruptionBudget(ctx); err != nil {
		return errorWhileProcessing(err)
	}
	if !resourceExists {
		klog.Info("Created resource : Pod Disruption Budgets")
	}

	// ensure config map
	var cm *corev1.ConfigMap
	if cm, resourceExists, err = sc.configMapController.EnsureConfigMap(ctx, sc); err != nil {
		return errorWhileProcessing(err)
	}
	if !resourceExists {
		klog.Info("Created resource : Config Map")
	}

	// Create a new ConfigSummary
	if sc.configSummary, err = ndbconfig.NewConfigSummary(cm.Data); err != nil {
		// less likely to happen as the only possible error is a config
		// parse error, and the configString was generated by the operator
		return errorWhileProcessing(err)
	}

	// First ensure that a operator password secret exists before creating statefulSet
	secretClient := NewMySQLUserPasswordSecretInterface(sc.kubernetesClient)
	if _, err := secretClient.EnsureNDBOperatorPassword(ctx, sc.ndb); err != nil {
		klog.Errorf("Failed to ensure ndb-operator password secret : %s", err)
		return errorWhileProcessing(err)
	}

	initialSystemRestart := sc.ndb.Status.ProcessedGeneration == 0

	nc := sc.ndb
	if nc.HasSyncError() {
		// Patch config map if new spec is available
		patched, err := sc.patchConfigMap(ctx)
		if patched {
			return finishProcessing()
		} else if err != nil {
			return errorWhileProcessing(err)
		}
	}

	// create the management stateful set if it doesn't exist
	if sc.mgmdNodeSfset, resourceExists, err = sc.ensureManagementServerStatefulSet(ctx); err != nil {
		return errorWhileProcessing(err)
	}
	if !resourceExists {
		// Management statefulset was just created.
		klog.Info("Created resource : StatefulSet for Management Nodes")
		// Wait for it to become ready before starting the data nodes.
		// Reconciliation will continue once all the pods in the statefulset are ready.
		klog.Infof("Reconciliation will continue after all the management nodes are ready.")
		return finishProcessing()
	}

	if initialSystemRestart && !statefulsetUpdateComplete(sc.mgmdNodeSfset) {
		if !workloadHasConfigGeneration(sc.mgmdNodeSfset, sc.configSummary.NdbClusterGeneration) {
			// Note: This case will arise during initial system restart, when there is an
			// error applying the NDB spec and the user updated the spec to rectify the error.
			// Since, the Configmap is already updated, just patching the statefulset alone
			// should work. But, mgmd statefulset has pod management policy set to OrderedReady.
			// Due to an issue in k8s, patching statefulset with OrderedReady does not restart the
			// pod until the backoff timer expires. And if the backoff time is high, the pods will
			// need to wait longer to get restarted. So, manually delete the pod after patching the
			// Statefulset.

			// User updated the NDB spec and the statefulset needs to be patched.
			klog.Infof("A new generation of NdbCluster spec exists and the statefulset %q needs to be updated", getNamespacedName(sc.mgmdNodeSfset))
			if sr := sc.reconcileManagementNodeStatefulSet(ctx); sr.stopSync() {
				if err := sr.getError(); err == nil {
					// ManagementNodeStatefulSet patched successfully
					klog.Infof("Delete smallest ordinal pod manually to avoid delay in restart")
					_, err := sc.deletePodOnStsUpdate(ctx, sc.mgmdNodeSfset, 0)
					return errorWhileProcessing(err)
				}
				return sr
			}
		}

		// Management nodes are starting for the first time, and
		// one of them is not ready yet which implies that this
		// reconciliation was triggered only to update the
		// NdbCluster status. No need to start data nodes and
		// MySQL Servers yet.
		return finishProcessing()
	}

	// create the data node stateful set if it doesn't exist
	if sc.dataNodeSfSet, resourceExists, err = sc.ensureDataNodeStatefulSet(ctx); err != nil {
		return errorWhileProcessing(err)
	}
	if !resourceExists {
		// Data nodes statefulset was just created.
		klog.Info("Created resource : StatefulSet for Data Nodes")
		// Wait for it to become ready.
		// Reconciliation will continue once all the pods in the statefulset are ready.
		klog.Infof("Reconciliation will continue after all the data nodes are ready.")
		return finishProcessing()
	}

	if initialSystemRestart && !statefulsetUpdateComplete(sc.dataNodeSfSet) {
		if !workloadHasConfigGeneration(sc.dataNodeSfSet, sc.configSummary.NdbClusterGeneration) {
			// User updated the NDB spec and the statefulset needs to be patched
			klog.Infof("A new generation of NdbCluster spec exists and the statefulset %q needs to be updated", getNamespacedName(sc.dataNodeSfSet))
			if sr := sc.reconcileDataNodeStatefulSet(ctx); sr.stopSync() {
				return sr
			}
		}
		// Data nodes are starting for the first time, and some of
		// them are not ready yet which implies that this
		// reconciliation was triggered only to update the NdbCluster
		// status. No need to proceed further.
		return finishProcessing()
	}

	// MySQL Server StatefulSet will be created only if required.
	// For now, just verify that if it exists, it is indeed owned by the NdbCluster resource.
	if sc.mysqldSfset, err = sc.validateMySQLServerStatefulSet(); err != nil {
		return errorWhileProcessing(err)
	}

	if sc.mysqldSfset != nil && !statefulsetUpdateComplete(sc.mysqldSfset) && !nc.HasSyncError() {
		// MySQL Server StatefulSet exists, but it is not complete yet
		// which implies that this reconciliation was triggered only
		// to update the NdbCluster status. No need to proceed further.
		return finishProcessing()
	}

	// The StatefulSets already existed before this sync loop.
	// There is a rare chance that some other resources were created during
	// this sync loop as they were dropped by some other application other
	// than the operator. We can still continue processing in that case as
	// they will become immediately ready.
	klog.Infof("All resources exist")
	return continueProcessing()
}