in controllers/solr_cluster_ops_util.go [36:87]
func determineScaleClusterOpLockIfNecessary(ctx context.Context, r *SolrCloudReconciler, instance *solrv1beta1.SolrCloud, statefulSet *appsv1.StatefulSet, podList []corev1.Pod, logger logr.Logger) (clusterLockAcquired bool, retryLaterDuration time.Duration, err error) {
desiredPods := int(*instance.Spec.Replicas)
configuredPods := int(*statefulSet.Spec.Replicas)
if desiredPods != configuredPods {
// We do not do a "managed" scale-to-zero operation.
// Only do a managed scale down if the desiredPods is positive.
// The VacatePodsOnScaleDown option is enabled by default, so treat "nil" like "true"
if desiredPods < configuredPods && desiredPods > 0 &&
(instance.Spec.Scaling.VacatePodsOnScaleDown == nil || *instance.Spec.Scaling.VacatePodsOnScaleDown) {
if len(podList) > configuredPods {
// There are too many pods, the statefulSet controller has yet to delete unwanted pods.
// Do not start the scale down until these extra pods are deleted.
return false, time.Second * 5, nil
}
// Managed Scale down!
originalStatefulSet := statefulSet.DeepCopy()
statefulSet.Annotations[util.ClusterOpsLockAnnotation] = util.ScaleDownLock
// The scaleDown metadata is the number of nodes to scale down to.
// We only support scaling down one pod at-a-time when using a managed scale-down.
// If the user wishes to scale down by multiple nodes, this ClusterOp will be done once-per-node.
statefulSet.Annotations[util.ClusterOpsMetadataAnnotation] = strconv.Itoa(configuredPods - 1)
if err = r.Patch(ctx, statefulSet, client.StrategicMergeFrom(originalStatefulSet)); err != nil {
logger.Error(err, "Error while patching StatefulSet to start clusterOp", "clusterOp", util.ScaleDownLock, "clusterOpMetadata", configuredPods-1)
} else {
clusterLockAcquired = true
}
} else if desiredPods > configuredPods && (instance.Spec.Scaling.PopulatePodsOnScaleUp == nil || *instance.Spec.Scaling.PopulatePodsOnScaleUp) {
if len(podList) < configuredPods {
// There are not enough pods, the statefulSet controller has yet to create the previously desired pods.
// Do not start the scale up until these missing pods are created.
return false, time.Second * 5, nil
}
// Managed Scale up!
originalStatefulSet := statefulSet.DeepCopy()
statefulSet.Annotations[util.ClusterOpsLockAnnotation] = util.ScaleUpLock
// The scaleUp metadata is the number of nodes that existed before the scaleUp.
// This allows the scaleUp operation to know which pods will be empty after the statefulSet is scaledUp.
statefulSet.Annotations[util.ClusterOpsMetadataAnnotation] = strconv.Itoa(configuredPods)
// We want to set the number of replicas at the beginning of the scaleUp operation
statefulSet.Spec.Replicas = pointer.Int32(int32(desiredPods))
if err = r.Patch(ctx, statefulSet, client.StrategicMergeFrom(originalStatefulSet)); err != nil {
logger.Error(err, "Error while patching StatefulSet to start clusterOp", "clusterOp", util.ScaleUpLock, "clusterOpMetadata", configuredPods, "newStatefulSetSize", desiredPods)
} else {
clusterLockAcquired = true
}
} else {
err = scaleCloudUnmanaged(ctx, r, statefulSet, desiredPods, logger)
}
}
return
}