func()

in pkg/ipamd/ipamd.go [437:590]


func (c *IPAMContext) nodeInit() error {
	ipamdActionsInprogress.WithLabelValues("nodeInit").Add(float64(1))
	defer ipamdActionsInprogress.WithLabelValues("nodeInit").Sub(float64(1))
	var err error
	var vpcV4CIDRs []string
	ctx := context.TODO()

	log.Debugf("Start node init")

	primaryV4IP := c.awsClient.GetLocalIPv4()
	err = c.initENIAndIPLimits()
	if c.enableIPv4 {
		//Subnets currently will have both v4 and v6 CIDRs. Once EC2 launches v6 only Subnets, that will no longer
		//be true and so it is safe (and only required) to get the v4 CIDR info only when IPv4 mode is enabled.
		vpcV4CIDRs, err = c.awsClient.GetVPCIPv4CIDRs()
		if err != nil {
			return err
		}
	}

	err = c.networkClient.SetupHostNetwork(vpcV4CIDRs, c.awsClient.GetPrimaryENImac(), &primaryV4IP, c.enablePodENI, c.enableIPv4,
		c.enableIPv6)
	if err != nil {
		return errors.Wrap(err, "ipamd init: failed to set up host network")
	}

	metadataResult, err := c.awsClient.DescribeAllENIs()
	if err != nil {
		return errors.New("ipamd init: failed to retrieve attached ENIs info")
	}
	log.Debugf("DescribeAllENIs success: ENIs: %d, tagged: %d", len(metadataResult.ENIMetadata), len(metadataResult.TagMap))
	c.awsClient.SetCNIUnmanagedENIs(metadataResult.MultiCardENIIDs)
	c.setUnmanagedENIs(metadataResult.TagMap)
	enis := c.filterUnmanagedENIs(metadataResult.ENIMetadata)

	for _, eni := range enis {
		log.Debugf("Discovered ENI %s, trying to set it up", eni.ENIID)

		isTrunkENI := eni.ENIID == metadataResult.TrunkENI
		isEFAENI := metadataResult.EFAENIs[eni.ENIID]
		if !isTrunkENI && !c.disableENIProvisioning {
			if err := c.awsClient.TagENI(eni.ENIID, metadataResult.TagMap[eni.ENIID]); err != nil {
				return errors.Wrapf(err, "ipamd init: failed to tag managed ENI %v", eni.ENIID)
			}
		}

		// Retry ENI sync
		retry := 0
		for {
			retry++
			if err = c.setupENI(eni.ENIID, eni, isTrunkENI, isEFAENI); err == nil {
				log.Infof("ENI %s set up.", eni.ENIID)
				break
			}

			if retry > maxRetryCheckENI {
				log.Warnf("Reached max retry: Unable to discover attached IPs for ENI from metadata service (attempted %d/%d): %v", retry, maxRetryCheckENI, err)
				ipamdErrInc("waitENIAttachedMaxRetryExceeded")
				break
			}

			log.Warnf("Error trying to set up ENI %s: %v", eni.ENIID, err)
			if strings.Contains(err.Error(), "setupENINetwork: failed to find the link which uses MAC address") {
				// If we can't find the matching link for this MAC address, there is no point in retrying for this ENI.
				log.Debug("Unable to match link for this ENI, going to the next one.")
				break
			}
			log.Debugf("Unable to discover IPs for this ENI yet (attempt %d/%d)", retry, maxRetryCheckENI)
			time.Sleep(eniAttachTime)
		}
	}

	if err := c.dataStore.ReadBackingStore(c.enableIPv6); err != nil {
		return err
	}

	if c.enableIPv6 {
		//We will not support upgrading/converting an existing IPv4 cluster to operate in IPv6 mode. So, we will always
		//start with a clean slate in IPv6 mode. We also don't have to deal with dynamic update of Prefix Delegation
		//feature in IPv6 mode as we don't support (yet) a non-PD v6 option. In addition, we don't support custom
		//networking & SGPP in IPv6 mode yet. So, we will skip the corresponding setup. Will save us from checking
		//if IPv6 is enabled at multiple places. Once we start supporting these features in IPv6 mode, we can do away
		//with this check and not change anything else in the below setup.
		return nil
	}

	if c.enablePrefixDelegation {
		//During upgrade or if prefix delgation knob is disabled to enabled then we
		//might have secondary IPs attached to ENIs so doing a cleanup if not used before moving on
		c.tryUnassignIPsFromENIs()
	} else {
		//When prefix delegation knob is enabled to disabled then we might
		//have unused prefixes attached to the ENIs so need to cleanup
		c.tryUnassignPrefixesFromENIs()
	}

	if err = c.configureIPRulesForPods(); err != nil {
		return err
	}
	// Spawning updateCIDRsRulesOnChange go-routine
	go wait.Forever(func() {
		vpcV4CIDRs = c.updateCIDRsRulesOnChange(vpcV4CIDRs)
	}, 30*time.Second)

	eniConfigName, err := eniconfig.GetNodeSpecificENIConfigName(ctx, c.cachedK8SClient)
	if err == nil && c.useCustomNetworking && eniConfigName != "default" {
		// Signal to VPC Resource Controller that the node is using custom networking
		err := c.SetNodeLabel(ctx, vpcENIConfigLabel, eniConfigName)
		if err != nil {
			log.Errorf("Failed to set eniConfig node label", err)
			podENIErrInc("nodeInit")
			return err
		}
	} else {
		// Remove the custom networking label
		err := c.SetNodeLabel(ctx, vpcENIConfigLabel, "")
		if err != nil {
			log.Errorf("Failed to delete eniConfig node label", err)
			podENIErrInc("nodeInit")
			return err
		}
	}

	if metadataResult.TrunkENI != "" {
		// Signal to VPC Resource Controller that the node has a trunk already
		err := c.SetNodeLabel(ctx, "vpc.amazonaws.com/has-trunk-attached", "true")
		if err != nil {
			log.Errorf("Failed to set node label", err)
			podENIErrInc("nodeInit")
			// If this fails, we probably can't talk to the API server. Let the pod restart
			return err
		}
	} else {
		// Check if we want to ask for one
		c.askForTrunkENIIfNeeded(ctx)
	}

	if !c.disableENIProvisioning {
		// For a new node, attach Cidrs (secondary ips/prefixes)
		increasedPool, err := c.tryAssignCidrs()
		if err == nil && increasedPool {
			c.updateLastNodeIPPoolAction()
		} else if err != nil {
			if containsInsufficientCIDRsOrSubnetIPs(err) {
				log.Errorf("Unable to attach IPs/Prefixes for the ENI, subnet doesn't seem to have enough IPs/Prefixes. Consider using new subnet or carve a reserved range using create-subnet-cidr-reservation")
				c.lastInsufficientCidrError = time.Now()
				return nil
			}
			return err
		}
	}

	return nil
}