in oracle/controllers/instancecontroller/instance_controller.go [100:394]
func (r *InstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, respErr error) {
ctx, cancel := context.WithTimeout(ctx, reconcileTimeout)
defer cancel()
log := r.Log.WithValues("Instance", req.NamespacedName)
log.Info("reconciling instance")
var inst v1alpha1.Instance
if err := r.Get(ctx, req.NamespacedName, &inst); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if err := validateSpec(&inst); err != nil {
log.Error(err, "instance spec validation failed")
// TODO better error handling, no need retry
return ctrl.Result{}, nil
}
defer func() {
r.updateIsChangeApplied(&inst, log)
if err := r.Status().Update(ctx, &inst); err != nil {
log.Error(err, "failed to update the instance status")
if respErr == nil {
respErr = err
}
}
}()
instanceReadyCond := k8s.FindCondition(inst.Status.Conditions, k8s.Ready)
if IsDeleting(&inst) {
return r.reconcileInstanceDeletion(ctx, req, log)
} else if IsStopped(&inst) && !k8s.ConditionReasonEquals(instanceReadyCond, k8s.InstanceStopped) {
return r.reconcileInstanceStop(ctx, req, log)
} else if !IsStopped(&inst) && k8s.ConditionReasonEquals(instanceReadyCond, k8s.InstanceStopped) {
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.CreateInProgress, "Restarting Instance")
}
// Add finalizer to clean up underlying objects in case of deletion.
if !controllerutil.ContainsFinalizer(&inst, controllers.FinalizerName) {
log.Info("adding a finalizer to the Instance object.")
controllerutil.AddFinalizer(&inst, controllers.FinalizerName)
if err := r.Update(ctx, &inst); err != nil {
return ctrl.Result{}, err
}
}
diskSpace, err := commonutils.DiskSpaceTotal(&inst)
if err != nil {
log.Error(err, "failed to calculate the total disk space")
}
log.Info("common instance", "total allocated disk space across all instance disks [Gi]", diskSpace/1024/1024/1024)
instanceReadyCond = k8s.FindCondition(inst.Status.Conditions, k8s.Ready)
dbInstanceCond := k8s.FindCondition(inst.Status.Conditions, k8s.DatabaseInstanceReady)
if inst.Spec.Mode == commonv1alpha1.Pause {
if instanceReadyCond == nil || dbInstanceCond == nil || instanceReadyCond.Reason != k8s.CreateComplete {
log.Info("Ignoring pause mode since only instances in a stable state can be paused.")
} else {
r.InstanceLocks.Store(fmt.Sprintf("%s-%s", inst.Namespace, inst.Name), true)
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.PauseMode, "Instance switched to pause mode")
log.Info("Instance has been set to pause for further reconciliation reset the pause mode")
return ctrl.Result{}, nil
}
}
var enabledServices []commonv1alpha1.Service
for service, enabled := range inst.Spec.Services {
if enabled {
enabledServices = append(enabledServices, service)
}
}
// If the instance is ready and DR enabled, we can set up standby DR.
if k8s.ConditionReasonEquals(instanceReadyCond, k8s.StandbyDRInProgress) && isStandbyDR(&inst) {
return r.standbyStateMachine(ctx, &inst, log)
}
if result, err := r.parameterUpdateStateMachine(ctx, req, inst, log); err != nil {
return result, err
}
// If the instance and database is ready, we can set the instance parameters
if k8s.ConditionStatusEquals(instanceReadyCond, v1.ConditionTrue) &&
k8s.ConditionStatusEquals(dbInstanceCond, v1.ConditionTrue) && (inst.Spec.EnableDnfs != inst.Status.DnfsEnabled) {
log.Info("instance and db is ready, modifying dNFS")
if err := r.setDnfs(ctx, inst, inst.Spec.EnableDnfs); err != nil {
return ctrl.Result{}, err
}
inst.Status.DnfsEnabled = inst.Spec.EnableDnfs
if inst.Status.DnfsEnabled {
log.Info("dNFS successfully enabled")
} else {
log.Info("dNFS successfully disabled")
}
}
instanceReadyCond = k8s.FindCondition(inst.Status.Conditions, k8s.Ready)
dbInstanceCond = k8s.FindCondition(inst.Status.Conditions, k8s.DatabaseInstanceReady)
// Load default preferences (aka "config") if provided by a customer.
config, err := r.loadConfig(ctx, req.NamespacedName.Namespace)
if err != nil {
return ctrl.Result{}, err
}
images := CloneMap(r.Images)
if err := r.overrideDefaultImages(config, images, &inst, log); err != nil {
return ctrl.Result{}, err
}
applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner("instance-controller")}
cm, err := controllers.NewConfigMap(&inst, r.Scheme(), fmt.Sprintf(controllers.CmName, inst.Name))
if err != nil {
log.Error(err, "failed to create a ConfigMap", "cm", cm)
return ctrl.Result{}, err
}
if err := r.Patch(ctx, cm, client.Apply, applyOpts...); err != nil {
return ctrl.Result{}, err
}
// Create a StatefulSet if needed.
sp := controllers.StsParams{
Inst: &inst,
Scheme: r.Scheme(),
Namespace: req.NamespacedName.Namespace,
Images: images,
SvcName: fmt.Sprintf(controllers.SvcName, inst.Name),
StsName: fmt.Sprintf(controllers.StsName, inst.Name),
PrivEscalation: false,
ConfigMap: cm,
Disks: controllers.DiskSpecs(&inst, config),
Config: config,
Log: log,
Services: enabledServices,
}
if IsPatchingStateMachineEntryCondition(inst.Spec.Services, inst.Status.ActiveImages, sp.Images, inst.Status.LastFailedImages, instanceReadyCond, dbInstanceCond) ||
inst.Status.CurrentActiveStateMachine == "PatchingStateMachine" {
databasePatchingTimeout := DefaultStsPatchingTimeout
if inst.Spec.DatabasePatchingTimeout != nil {
databasePatchingTimeout = inst.Spec.DatabasePatchingTimeout.Duration
}
result, err, done := r.patchingStateMachine(req, instanceReadyCond, dbInstanceCond, &inst, ctx, &sp, config, databasePatchingTimeout, log)
if err != nil {
log.Error(err, "patchingStateMachine failed")
}
if done {
return result, err
}
}
// If there is a Restore section in the spec the reconciliation will be handled
// by restore state machine until the Spec.Restore section is removed again.
if inst.Spec.Restore != nil {
// Ask the restore state machine to reconcile
result, err := r.restoreStateMachine(req, instanceReadyCond, dbInstanceCond, &inst, ctx, sp, log)
if err != nil {
log.Error(err, "restoreStateMachine failed")
return result, err
}
if !result.IsZero() {
return result, err
}
// No error and no result - state machine is done, proceed with main reconciler
}
//if we return something we have to requeue
res, err := r.handleResize(ctx, &inst, instanceReadyCond, dbInstanceCond, sp, applyOpts, log)
if err != nil {
return ctrl.Result{}, err
} else if !res.IsZero() {
return res, nil
}
if k8s.ConditionStatusEquals(instanceReadyCond, v1.ConditionTrue) && k8s.ConditionStatusEquals(dbInstanceCond, v1.ConditionTrue) {
log.Info("instance has already been provisioned and ready")
if res, err := r.reconcileMonitoring(ctx, &inst, log, images); err != nil || res.RequeueAfter > 0 {
return res, err
}
return ctrl.Result{}, r.updateDatabaseIncarnationStatus(ctx, &inst, r.Log)
}
if result, err := r.createStatefulSet(ctx, &inst, sp, applyOpts, log); err != nil {
return result, err
}
dbLoadBalancer, err := r.createDBLoadBalancer(ctx, &inst, applyOpts)
if err != nil {
return ctrl.Result{}, err
}
_, _, err = r.createDataplaneServices(ctx, inst, applyOpts)
if err != nil {
return ctrl.Result{}, err
}
if instanceReadyCond == nil {
instanceReadyCond = k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.CreateInProgress, "")
}
inst.Status.Endpoint = fmt.Sprintf(controllers.SvcEndpoint, fmt.Sprintf(controllers.SvcName, inst.Name), inst.Namespace)
inst.Status.URL = commonutils.LoadBalancerURL(dbLoadBalancer, consts.SecureListenerPort)
// RequeueAfter 30 seconds to avoid constantly reconcile errors before statefulSet is ready.
// Update status when the Service is ready (for the initial provisioning).
// Also confirm that the StatefulSet is up and running.
if k8s.ConditionReasonEquals(instanceReadyCond, k8s.CreateInProgress) {
elapsed := k8s.ElapsedTimeFromLastTransitionTime(instanceReadyCond, time.Second)
if elapsed > InstanceReadyTimeout {
r.Recorder.Eventf(&inst, corev1.EventTypeWarning, "InstanceReady", fmt.Sprintf("Instance provision timed out after %v", InstanceReadyTimeout))
msg := fmt.Sprintf("Instance provision timed out. Elapsed Time: %v", elapsed)
log.Info(msg)
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.CreateInProgress, msg)
return ctrl.Result{}, nil
}
if !r.updateProgressCondition(ctx, inst, req.NamespacedName.Namespace, controllers.CreateInProgress, log) {
log.Info("requeue after 30 seconds")
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}
if inst.Status.URL != "" {
if !k8s.ConditionReasonEquals(instanceReadyCond, k8s.CreateComplete) {
r.Recorder.Eventf(&inst, corev1.EventTypeNormal, "InstanceReady", "Instance has been created successfully. Elapsed Time: %v", elapsed)
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionTrue, k8s.CreateComplete, "")
inst.Status.ActiveImages = CloneMap(sp.Images)
return ctrl.Result{Requeue: true}, nil
}
}
}
if inst.Labels == nil {
inst.Labels = map[string]string{"instance": inst.Name}
if err := r.Update(ctx, &inst); err != nil {
log.Error(err, "failed to update the Instance spec (set labels)")
return ctrl.Result{}, err
}
}
if isStandbyDR(&inst) {
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.StandbyDRInProgress, "standby DR in progress")
return ctrl.Result{Requeue: true}, nil
}
// When we reach here, the instance should be ready.
if inst.Spec.Mode == commonv1alpha1.ManuallySetUpStandby {
log.Info("reconciling instance for manually set up standby: DONE")
// the code will return here, so we can rely on defer function to update database status.
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionFalse, k8s.ManuallySetUpStandbyInProgress, fmt.Sprintf("Setting up standby database in progress, remove spec.mode %v to promote the instance", inst.Spec.Mode))
k8s.InstanceUpsertCondition(&inst.Status, k8s.StandbyReady, v1.ConditionTrue, k8s.CreateComplete, fmt.Sprintf("standby instance creation complete, ready to set up standby database in the instance"))
return ctrl.Result{}, nil
}
if k8s.ConditionStatusEquals(k8s.FindCondition(inst.Status.Conditions, k8s.StandbyReady), v1.ConditionTrue) {
// promote the standby instance, bootstrap is part of promotion.
r.Recorder.Eventf(&inst, corev1.EventTypeNormal, k8s.PromoteStandbyInProgress, "")
if err := r.bootstrapStandby(ctx, &inst); err != nil {
r.Recorder.Eventf(&inst, corev1.EventTypeWarning, k8s.PromoteStandbyFailed, fmt.Sprintf("Error promoting standby: %v", err))
return ctrl.Result{}, err
}
// the standby instance has been successfully promoted, set ready condition
// to true and standby ready to false. Promotion need to be idempotent to
// ensure the correctness under retry.
r.Recorder.Eventf(&inst, corev1.EventTypeNormal, k8s.PromoteStandbyComplete, "")
k8s.InstanceUpsertCondition(&inst.Status, k8s.Ready, v1.ConditionTrue, k8s.CreateComplete, "")
k8s.InstanceUpsertCondition(&inst.Status, k8s.DatabaseInstanceReady, v1.ConditionTrue, k8s.CreateComplete, "")
k8s.InstanceUpsertCondition(&inst.Status, k8s.StandbyReady, v1.ConditionFalse, k8s.PromoteStandbyComplete, "")
return ctrl.Result{Requeue: true}, err
}
var dbs v1alpha1.DatabaseList
if err := r.List(ctx, &dbs, client.InNamespace(req.Namespace)); err != nil {
log.V(1).Info("failed to list databases for instance", "inst.Name", inst.Name)
} else {
log.Info("list of queried databases", "dbs", dbs)
}
log.Info("instance status", "instanceReadyCond", instanceReadyCond, "endpoint", inst.Status.Endpoint,
"url", inst.Status.URL, "databases", inst.Status.DatabaseNames)
log.Info("reconciling instance: DONE")
result, err := r.reconcileDatabaseInstance(ctx, &inst, r.Log, images)
log.Info("reconciling database instance: DONE", "result", result, "err", err)
if err != nil {
return result, err
}
return result, nil
}