func()

in cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go [57:168]


func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, ignoredTaints taints.TaintKeySet, now time.Time) (map[string]*schedulerframework.NodeInfo, errors.AutoscalerError) {
	// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
	// TODO(mwielgus): Review error policy - sometimes we may continue with partial errors.
	result := make(map[string]*schedulerframework.NodeInfo)
	seenGroups := make(map[string]bool)

	podsForNodes, err := getPodsForNodes(ctx.ListerRegistry)
	if err != nil {
		return map[string]*schedulerframework.NodeInfo{}, err
	}

	// processNode returns information whether the nodeTemplate was generated and if there was an error.
	processNode := func(node *apiv1.Node) (bool, string, errors.AutoscalerError) {
		nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
		if err != nil {
			return false, "", errors.ToAutoscalerError(errors.CloudProviderError, err)
		}
		if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
			return false, "", nil
		}
		id := nodeGroup.Id()
		if _, found := result[id]; !found {
			// Build nodeInfo.
			nodeInfo, err := simulator.BuildNodeInfoForNode(node, podsForNodes)
			if err != nil {
				return false, "", err
			}
			sanitizedNodeInfo, err := utils.SanitizeNodeInfo(nodeInfo, id, ignoredTaints)
			if err != nil {
				return false, "", err
			}
			result[id] = sanitizedNodeInfo
			return true, id, nil
		}
		return false, "", nil
	}

	for _, node := range nodes {
		// Broken nodes might have some stuff missing. Skipping.
		if !isNodeGoodTemplateCandidate(node, now) {
			continue
		}
		added, id, typedErr := processNode(node)
		if typedErr != nil {
			return map[string]*schedulerframework.NodeInfo{}, typedErr
		}
		if added && p.nodeInfoCache != nil {
			if nodeInfoCopy, err := utils.DeepCopyNodeInfo(result[id]); err == nil {
				p.nodeInfoCache[id] = nodeInfoCopy
			}
		}
	}
	for _, nodeGroup := range ctx.CloudProvider.NodeGroups() {
		id := nodeGroup.Id()
		seenGroups[id] = true
		if _, found := result[id]; found {
			continue
		}

		// No good template, check cache of previously running nodes.
		if p.nodeInfoCache != nil {
			if nodeInfo, found := p.nodeInfoCache[id]; found {
				if nodeInfoCopy, err := utils.DeepCopyNodeInfo(nodeInfo); err == nil {
					result[id] = nodeInfoCopy
					continue
				}
			}
		}

		// No good template, trying to generate one. This is called only if there are no
		// working nodes in the node groups. By default CA tries to use a real-world example.
		nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonsets, ctx.PredicateChecker, ignoredTaints)
		if err != nil {
			if err == cloudprovider.ErrNotImplemented {
				continue
			} else {
				klog.Errorf("Unable to build proper template node for %s: %v", id, err)
				return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err)
			}
		}
		result[id] = nodeInfo
	}

	// Remove invalid node groups from cache
	for id := range p.nodeInfoCache {
		if _, ok := seenGroups[id]; !ok {
			delete(p.nodeInfoCache, id)
		}
	}

	// Last resort - unready/unschedulable nodes.
	for _, node := range nodes {
		// Allowing broken nodes
		if isNodeGoodTemplateCandidate(node, now) {
			continue
		}
		added, _, typedErr := processNode(node)
		if typedErr != nil {
			return map[string]*schedulerframework.NodeInfo{}, typedErr
		}
		nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node)
		if err != nil {
			return map[string]*schedulerframework.NodeInfo{}, errors.ToAutoscalerError(
				errors.CloudProviderError, err)
		}
		if added {
			klog.Warningf("Built template for %s based on unready/unschedulable node %s", nodeGroup.Id(), node.Name)
		}
	}

	return result, nil
}