in pkg/operations/kubernetesupgrade/upgrader.go [174:330]
func (ku *Upgrader) upgradeMasterNodes(ctx context.Context) error {
if ku.ClusterTopology.DataModel.Properties.MasterProfile == nil {
return nil
}
ku.logger.Infof("Master nodes StorageProfile: %s", ku.ClusterTopology.DataModel.Properties.MasterProfile.StorageProfile)
// Upgrade Master VMs
templateMap, parametersMap, err := ku.generateUpgradeTemplate(ku.ClusterTopology.DataModel, ku.AKSEngineVersion)
if err != nil {
return ku.Translator.Errorf("error generating upgrade template: %s", err.Error())
}
ku.logger.Infof("Prepping master nodes for upgrade...")
transformer := &transform.Transformer{
Translator: ku.Translator,
}
if ku.ClusterTopology.DataModel.Properties.OrchestratorProfile.KubernetesConfig.PrivateJumpboxProvision() {
err = transformer.RemoveJumpboxResourcesFromTemplate(ku.logger, templateMap)
if err != nil {
return ku.Translator.Errorf("error removing jumpbox resources from template: %s", err.Error())
}
}
if ku.DataModel.Properties.OrchestratorProfile.KubernetesConfig.LoadBalancerSku == api.StandardLoadBalancerSku {
err = transformer.NormalizeForK8sSLBScalingOrUpgrade(ku.logger, templateMap)
if err != nil {
return ku.Translator.Errorf("error normalizing upgrade template for SLB: %s", err.Error())
}
}
if to.Bool(ku.DataModel.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms) {
err = transformer.RemoveKMSResourcesFromTemplate(ku.logger, templateMap)
if err != nil {
return ku.Translator.Errorf("error removing KMS resources from template: %s", err.Error())
}
}
if err = transformer.NormalizeResourcesForK8sMasterUpgrade(ku.logger, templateMap, ku.DataModel.Properties.MasterProfile.IsManagedDisks(), nil); err != nil {
ku.logger.Error(err.Error())
return err
}
transformer.RemoveImmutableResourceProperties(ku.logger, templateMap)
upgradeMasterNode := UpgradeMasterNode{
Translator: ku.Translator,
logger: ku.logger,
}
upgradeMasterNode.TemplateMap = templateMap
upgradeMasterNode.ParametersMap = parametersMap
upgradeMasterNode.UpgradeContainerService = ku.ClusterTopology.DataModel
upgradeMasterNode.ResourceGroup = ku.ClusterTopology.ResourceGroup
upgradeMasterNode.SubscriptionID = ku.ClusterTopology.SubscriptionID
upgradeMasterNode.Client = ku.Client
upgradeMasterNode.kubeConfig = ku.kubeConfig
if ku.stepTimeout == nil {
upgradeMasterNode.timeout = defaultTimeout
} else {
upgradeMasterNode.timeout = *ku.stepTimeout
}
expectedMasterCount := ku.ClusterTopology.DataModel.Properties.MasterProfile.Count
mastersUpgradedCount := len(*ku.ClusterTopology.UpgradedMasterVMs)
mastersToUgradeCount := expectedMasterCount - mastersUpgradedCount
ku.logger.Infof("Total expected master count: %d", expectedMasterCount)
ku.logger.Infof("Master nodes that need to be upgraded: %d", mastersToUgradeCount)
ku.logger.Infof("Master nodes that have been upgraded: %d", mastersUpgradedCount)
ku.logger.Infof("Starting upgrade of master nodes...")
masterNodesInCluster := len(*ku.ClusterTopology.MasterVMs) + mastersUpgradedCount
ku.logger.Infof("masterNodesInCluster: %d", masterNodesInCluster)
if masterNodesInCluster > expectedMasterCount {
return ku.Translator.Errorf("Total count of master VMs: %d exceeded expected count: %d", masterNodesInCluster, expectedMasterCount)
}
// This condition is possible if the previous upgrade operation failed during master
// VM upgrade when a master VM was deleted but creation of upgraded master did not run.
if masterNodesInCluster < expectedMasterCount {
ku.logger.Infof(
"Found missing master VMs in the cluster. Reconstructing names of missing master VMs for recreation during upgrade...")
}
existingMastersIndex := make(map[int]bool)
for _, vm := range *ku.ClusterTopology.MasterVMs {
masterIndex, _ := utils.GetVMNameIndex(*vm.Properties.StorageProfile.OSDisk.OSType, *vm.Name)
existingMastersIndex[masterIndex] = true
}
mastersToCreate := expectedMasterCount - masterNodesInCluster
ku.logger.Infof("Expected master count: %d, Creating %d more master VMs", expectedMasterCount, mastersToCreate)
// NOTE: this is NOT completely idempotent because it assumes that
// the OS disk has been deleted
for i := 0; i < mastersToCreate; i++ {
masterIndexToCreate := 0
for existingMastersIndex[masterIndexToCreate] {
masterIndexToCreate++
}
ku.logger.Infof("Creating upgraded master VM with index: %d", masterIndexToCreate)
err = upgradeMasterNode.CreateNode(ctx, "master", masterIndexToCreate)
if err != nil {
ku.logger.Infof("Error creating upgraded master VM with index: %d", masterIndexToCreate)
return err
}
tempVMName := ""
err = upgradeMasterNode.Validate(&tempVMName)
if err != nil {
ku.logger.Infof("Error validating upgraded master VM with index: %d", masterIndexToCreate)
return err
}
existingMastersIndex[masterIndexToCreate] = true
}
upgradedMastersIndex := make(map[int]bool)
for _, vm := range *ku.ClusterTopology.UpgradedMasterVMs {
ku.logger.Infof("Master VM: %s is upgraded to expected orchestrator version", *vm.Name)
masterIndex, _ := utils.GetVMNameIndex(*vm.Properties.StorageProfile.OSDisk.OSType, *vm.Name)
upgradedMastersIndex[masterIndex] = true
}
for _, vm := range *ku.ClusterTopology.MasterVMs {
ku.logger.Infof("Upgrading Master VM: %s", *vm.Name)
masterIndex, _ := utils.GetVMNameIndex(*vm.Properties.StorageProfile.OSDisk.OSType, *vm.Name)
err = upgradeMasterNode.DeleteNode(vm.Name, false)
if err != nil {
ku.logger.Infof("Error deleting master VM: %s, err: %v", *vm.Name, err)
return err
}
err = upgradeMasterNode.CreateNode(ctx, "master", masterIndex)
if err != nil {
ku.logger.Infof("Error creating upgraded master VM: %s", *vm.Name)
return err
}
err = upgradeMasterNode.Validate(vm.Name)
if err != nil {
ku.logger.Infof("Error validating upgraded master VM: %s", *vm.Name)
return err
}
upgradedMastersIndex[masterIndex] = true
}
return nil
}