func()

in cmd/e2e-test/node/create.go [64:210]


func (c *create) Run(log *zap.Logger, opts *cli.GlobalOptions) error {
	ctx := context.Background()
	config, err := e2e.ReadConfig(c.configFile)
	if err != nil {
		return err
	}

	logger := e2e.NewLogger()
	aws, err := e2e.NewAWSConfig(ctx, awsconfig.WithRegion(config.ClusterRegion))
	if err != nil {
		return fmt.Errorf("reading AWS configuration: %w", err)
	}

	infra, err := peered.Setup(ctx, logger, aws, config.ClusterName, config.Endpoint)
	if err != nil {
		return fmt.Errorf("setting up test infrastructure: %w", err)
	}

	eksClient := e2e.NewEKSClient(aws, config.Endpoint)
	ec2Client := ec2.NewFromConfig(aws)
	ssmClient := ssmsdk.NewFromConfig(aws)
	s3Client := s3sdk.NewFromConfig(aws)

	clientConfig, err := clientcmd.BuildConfigFromFlags("", cluster.KubeconfigPath(config.ClusterName))
	if err != nil {
		return err
	}
	k8s, err := clientgo.NewForConfig(clientConfig)
	if err != nil {
		return err
	}

	k8sDynamic, err := dynamic.NewForConfig(clientConfig)
	if err != nil {
		return err
	}

	cluster, err := peered.GetHybridCluster(ctx, eksClient, ec2Client, config.ClusterName)
	if err != nil {
		return err
	}

	urls, err := s3.BuildNodeamURLs(ctx, s3Client, config.NodeadmUrlAMD, config.NodeadmUrlARM)
	if err != nil {
		return err
	}

	node := peered.NodeCreate{
		AWS:     aws,
		Cluster: cluster,
		EC2:     ec2Client,
		SSM:     ssmClient,
		Logger:  logger,

		NodeadmURLs: *urls,
		PublicKey:   infra.NodesPublicSSHKey,
	}

	nodeOS, err := buildOS(c.os, c.arch)
	if err != nil {
		return err
	}

	var credsProvider e2e.NodeadmCredentialsProvider

	switch c.credsProvider {
	case "iam-ra":
		credsProvider = &credentials.IamRolesAnywhereProvider{
			RoleARN:        infra.Credentials.IRANodeRoleARN,
			ProfileARN:     infra.Credentials.IRAProfileARN,
			TrustAnchorARN: infra.Credentials.IRATrustAnchorARN,
			CA:             infra.Credentials.RolesAnywhereCA,
		}
	case "ssm":
		credsProvider = &credentials.SsmProvider{
			SSM:  ssmClient,
			Role: infra.Credentials.SSMNodeRoleName,
		}
	}

	instanceSize := e2e.Large
	if c.instanceSize == "XLarge" {
		instanceSize = e2e.XLarge
	}

	peerdNode, err := node.Create(ctx, &peered.NodeSpec{
		InstanceName:   c.instanceName,
		InstanceSize:   instanceSize,
		NodeK8sVersion: cluster.KubernetesVersion,
		NodeName:       c.instanceName,
		OS:             nodeOS,
		Provider:       credsProvider,
	})
	if err != nil {
		return err
	}

	logger.Info("Node created", "instanceID", peerdNode.Instance.ID)

	logger.Info("Connecting to the node serial console...")
	serial, err := node.SerialConsole(ctx, peerdNode.Instance.ID)
	if err != nil {
		return fmt.Errorf("preparing EC2 for serial connection: %w", err)
	}
	defer serial.Close()

	pausableOutput := e2e.NewSwitchWriter(os.Stdout)
	pausableOutput.Pause()
	if err := serial.Copy(pausableOutput); err != nil {
		return fmt.Errorf("connecting to EC2 serial console: %w", err)
	}

	logger.Info("Waiting for the node to initialize...")
	if err := pausableOutput.Resume(); err != nil {
		return fmt.Errorf("resuming output: %w", err)
	}

	verifyNode := kubernetes.VerifyNode{
		K8s:      k8s,
		Logger:   logr.Discard(),
		NodeName: peerdNode.Name,
		NodeIP:   peerdNode.Instance.IP,
	}
	vn, err := verifyNode.WaitForNodeReady(ctx)
	if err != nil {
		return fmt.Errorf("waiting for node to be ready: %w", err)
	}
	pausableOutput.Pause()
	fmt.Println() // newline after pausing the serial output to ensure a clean log after
	logger.Info("Node is ready", "nodeName", vn.Name)

	network := peered.Network{
		EC2:    ec2Client,
		Logger: logger,
		K8s: peered.K8s{
			Interface: k8s,
			Dynamic:   k8sDynamic,
		},
		Cluster: cluster,
	}

	if err := network.CreateRoutesForNode(ctx, &peerdNode); err != nil {
		return fmt.Errorf("creating routes for node: %w", err)
	}

	return nil
}