in pkg/controller/clusters/status.go [202:312]
func updateWorkersReadyCondition(cluster *anywherev1.Cluster, machineDeployments []clusterv1.MachineDeployment) {
initializedCondition := conditions.Get(cluster, anywherev1.ControlPlaneInitializedCondition)
if initializedCondition.Status != "True" {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.ControlPlaneNotInitializedReason, clusterv1.ConditionSeverityInfo, "")
return
}
totalExpected := 0
wngWithAutoScalingConfigurationMap := make(map[string]anywherev1.AutoScalingConfiguration)
for _, wng := range cluster.Spec.WorkerNodeGroupConfigurations {
// We want to consider only the worker node groups which don't have autoscaling configuration for expected worker nodes count.
if wng.AutoScalingConfiguration == nil {
totalExpected += *wng.Count
} else {
wngWithAutoScalingConfigurationMap[wng.Name] = *wng.AutoScalingConfiguration
}
}
// First, we need to aggregate the number of nodes across worker node groups to be able to assess the condition of the workers
// as a whole.
totalReadyReplicas := 0
totalUpdatedReplicas := 0
totalReplicas := 0
for _, md := range machineDeployments {
// We make sure to check that the status is up to date before using the information from the machine deployment status.
if md.Status.ObservedGeneration != md.ObjectMeta.Generation {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.OutdatedInformationReason, clusterv1.ConditionSeverityInfo, "Worker node group %s status not up to date yet", md.Name)
return
}
// Skip updating the replicas for the machine deployments which have autoscaling configuration annotation
if md.ObjectMeta.Annotations != nil {
if _, ok := md.ObjectMeta.Annotations[clusterapi.NodeGroupMinSizeAnnotation]; ok {
continue
}
}
totalReadyReplicas += int(md.Status.ReadyReplicas)
totalUpdatedReplicas += int(md.Status.UpdatedReplicas)
totalReplicas += int(md.Status.Replicas)
}
// There may be worker nodes that are not up to date yet in the case of a rolling upgrade,
// so reflect that on the condition with an appropriate message.
totalOutdated := totalReplicas - totalUpdatedReplicas
if totalOutdated > 0 {
upgradeReason := anywherev1.RollingUpgradeInProgress
// We are checking the control plane configuration here because we already validate that all the machines
// have the same upgrade strategy.
if cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil {
if cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.Type == anywherev1.InPlaceStrategyType {
upgradeReason = anywherev1.InPlaceUpgradeInProgress
}
}
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, upgradeReason, clusterv1.ConditionSeverityInfo, "Worker nodes not up-to-date yet, %d upgrading (%d up to date)", totalReplicas, totalUpdatedReplicas)
return
}
// If the number of worker nodes replicas need to be scaled up.
if totalReplicas < totalExpected {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, %d expected (%d actual)", totalExpected, totalReplicas)
return
}
// If the number of worker nodes replicas need to be scaled down.
if totalReplicas > totalExpected {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.ScalingDownReason, clusterv1.ConditionSeverityInfo, "Scaling down worker nodes, %d expected (%d actual)", totalExpected, totalReplicas)
return
}
if totalReadyReplicas != totalExpected {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.NodesNotReadyReason, clusterv1.ConditionSeverityInfo, "Worker nodes not ready yet, %d expected (%d ready)", totalExpected, totalReadyReplicas)
return
}
// Iterating through the machine deployments which have autoscaling configured to check if the number of worker nodes replicas
// are between min count and max count specified in the cluster spec.
for _, md := range machineDeployments {
if wng, exists := wngWithAutoScalingConfigurationMap[md.ObjectMeta.Name]; exists {
minCount := wng.MinCount
maxCount := wng.MaxCount
replicas := int(md.Status.Replicas)
if replicas < minCount || replicas > maxCount {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.AutoscalerConstraintNotMetReason, clusterv1.ConditionSeverityInfo, "Worker nodes count for %s not between %d and %d yet (%d actual)", md.ObjectMeta.Name, minCount, maxCount, replicas)
return
}
}
}
// We check for the Ready condition on the machine deployments as a final validation. Usually, the md objects
// should be ready at this point but if that is not the case, we report it as an error.
for _, md := range machineDeployments {
mdConditions := md.GetConditions()
if mdConditions == nil {
continue
}
var machineDeploymentReadyCondition *clusterv1.Condition
for _, condition := range mdConditions {
if condition.Type == clusterv1.ReadyCondition {
machineDeploymentReadyCondition = &condition
}
}
if machineDeploymentReadyCondition != nil && machineDeploymentReadyCondition.Status == v1.ConditionFalse {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.MachineDeploymentNotReadyReason, clusterv1.ConditionSeverityError, "Machine deployment %s not ready yet", md.ObjectMeta.Name)
return
}
}
conditions.MarkTrue(cluster, anywherev1.WorkersReadyCondition)
}