in pkg/backend/openshiftcluster.go [94:214]
func (ocb *openShiftClusterBackend) handle(ctx context.Context, log *logrus.Entry, doc *api.OpenShiftClusterDocument, monitorDeleteWaitTimeSec int) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
stop := ocb.heartbeat(ctx, cancel, log, doc)
defer stop()
r, err := azure.ParseResourceID(doc.OpenShiftCluster.ID)
if err != nil {
return err
}
subscriptionDoc, err := ocb.dbSubscriptions.Get(ctx, r.SubscriptionID)
if err != nil {
return err
}
// Only attempt to access Hive if we are installing via Hive or adopting clusters
installViaHive, err := ocb.env.LiveConfig().InstallViaHive(ctx)
if err != nil {
return err
}
adoptViaHive, err := ocb.env.LiveConfig().AdoptByHive(ctx)
if err != nil {
return err
}
var hr hive.ClusterManager
if installViaHive || adoptViaHive {
hiveShard := 1
hiveRestConfig, err := ocb.env.LiveConfig().HiveRestConfig(ctx, hiveShard)
if err != nil {
return fmt.Errorf("failed getting RESTConfig for Hive shard %d: %w", hiveShard, err)
}
hr, err = hive.NewFromConfigClusterManager(log, ocb.env, hiveRestConfig)
if err != nil {
return fmt.Errorf("failed creating HiveClusterManager: %w", err)
}
}
m, err := ocb.newManager(ctx, log, ocb.env, ocb.dbOpenShiftClusters, ocb.dbGateway, ocb.dbOpenShiftVersions, ocb.dbPlatformWorkloadIdentityRoleSets, ocb.aead, ocb.billing, doc, subscriptionDoc, hr, ocb.m)
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateFailed, api.ProvisioningStateFailed, err)
}
switch doc.OpenShiftCluster.Properties.ProvisioningState {
case api.ProvisioningStateCreating:
log.Print("creating")
err = m.Install(ctx)
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateCreating, api.ProvisioningStateFailed, err)
}
// re-get document and check the state:
// if Install = nil, we are done with the install.
// if Install != nil, we need to terminate, release lease and let other
// backend worker to pick up next install phase
doc, err = ocb.dbOpenShiftClusters.Get(ctx, strings.ToLower(doc.OpenShiftCluster.ID))
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateCreating, api.ProvisioningStateFailed, err)
}
if doc.OpenShiftCluster.Properties.Install == nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateCreating, api.ProvisioningStateSucceeded, nil)
}
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateCreating, api.ProvisioningStateCreating, nil)
case api.ProvisioningStateAdminUpdating:
log.Printf("admin updating (type: %s)", doc.OpenShiftCluster.Properties.MaintenanceTask)
err = m.AdminUpdate(ctx)
if err != nil {
// Customer will continue to see the cluster in an ongoing maintenance state
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateAdminUpdating, api.ProvisioningStateFailed, err)
}
// Maintenance task is complete, so we can clear the maintenance state
doc, err = ocb.setNoMaintenanceState(ctx, doc)
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateAdminUpdating, api.ProvisioningStateFailed, err)
}
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateAdminUpdating, api.ProvisioningStateSucceeded, nil)
case api.ProvisioningStateUpdating:
log.Print("updating")
err = m.Update(ctx)
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateUpdating, api.ProvisioningStateFailed, err)
}
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateUpdating, api.ProvisioningStateSucceeded, nil)
case api.ProvisioningStateDeleting:
log.Print("deleting")
t := time.Now()
err = m.Delete(ctx)
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateDeleting, api.ProvisioningStateFailed, err)
}
err = ocb.updateAsyncOperation(ctx, log, doc.AsyncOperationID, nil, api.ProvisioningStateSucceeded, "", nil)
if err != nil {
return ocb.endLease(ctx, log, stop, doc, api.ProvisioningStateDeleting, api.ProvisioningStateFailed, err)
}
stop()
// This Sleep ensures that the monitor has enough time
// to capture the deletion (by reading from the changefeed)
// and stop monitoring the cluster.
// TODO: Provide better communication between RP and Monitor
time.Sleep(time.Until(t.Add(time.Second * time.Duration(monitorDeleteWaitTimeSec))))
err := ocb.dbOpenShiftClusters.Delete(ctx, doc)
ocb.asyncOperationResultLog(log, doc, doc.OpenShiftCluster.Properties.ProvisioningState, err)
ocb.emitMetrics(log, doc, api.ProvisioningStateDeleting, api.ProvisioningStateFailed, err)
ocb.emitProvisioningMetrics(doc, api.ProvisioningStateFailed)
return err
}
return fmt.Errorf("unexpected provisioningState %q", doc.OpenShiftCluster.Properties.ProvisioningState)
}