def deploy_env()

in cli/aws_orbit/remote_files/eksctl.py [0:0]


def deploy_env(context: "Context", changeset: Optional[Changeset]) -> None:
    stack_name: str = f"orbit-{context.name}"
    final_eks_stack_name: str = f"eksctl-{stack_name}-cluster"
    _logger.debug("EKSCTL stack name: %s", final_eks_stack_name)
    _logger.debug("Synthetizing the EKSCTL Environment manifest")
    cluster_name = f"orbit-{context.name}"

    if not cfn.does_stack_exist(stack_name=final_eks_stack_name):

        requested_nodegroups = (
            changeset.managed_nodegroups_changeset.added_nodegroups
            if changeset and changeset.managed_nodegroups_changeset
            else []
        )
        _logger.debug(f"requested nodegroups: {[n.name for n in requested_nodegroups]}")

        output_filename = generate_manifest(context=context, name=stack_name, nodegroups=requested_nodegroups)

        _logger.debug("Deploying EKSCTL Environment resources")
        sh.run(
            f"eksctl create cluster -f {output_filename} --install-nvidia-plugin=false "
            "--write-kubeconfig --verbose 4"
        )

        username = context.toolkit.admin_role
        arn = f"arn:aws:iam::{context.account_id}:role/{username}"
        _logger.debug(f"Adding IAM Identity Mapping - Role: {arn}, Username: {username}, Group: system:masters")
        sh.run(
            f"eksctl create iamidentitymapping --cluster {cluster_name} --arn {arn} "
            f"--username {username} --group system:masters"
        )

        if context.networking.secondary_cidr:
            set_secondary_vpc_cidr_env_vars()
            subnet_ids = [subnet.subnet_id for subnet in context.networking.private_subnets]
            _logger.debug(f"Getting AZ'z for the subnets: {subnet_ids}")
            az_subnet_map = get_az_from_subnet(subnets=subnet_ids)
            deploy_eniconfig(az_subnet_map=az_subnet_map, context=context)

        context.managed_nodegroups = requested_nodegroups

        for ng in requested_nodegroups:
            if ng.nodes_num_desired < 1 or ng.nodes_num_min < 1:
                _logger.debug(f"Reducing AutoScaling capacity for newly create NodeGroup: {ng.name}")
                autoscaling.update_nodegroup_autoscaling_group(
                    cluster_name=f"orbit-{context.name}", nodegroup_manifest=ng
                )

        ContextSerDe.dump_context_to_ssm(context=context)
    else:

        current_nodegroups = context.managed_nodegroups
        _logger.debug(f"current nodegroups: {[n.name for n in current_nodegroups]}")

        sh.run(f"eksctl utils write-kubeconfig --cluster orbit-{context.name} --set-kubeconfig-context")
        if changeset and changeset.managed_nodegroups_changeset:
            if changeset.managed_nodegroups_changeset.added_nodegroups:
                output_filename = generate_manifest(
                    context=context, name=stack_name, nodegroups=changeset.managed_nodegroups_changeset.added_nodegroups
                )
                nodegroups = [
                    ng.name
                    for ng in changeset.managed_nodegroups_changeset.added_nodegroups
                    if not eks.describe_nodegroup(cluster_name=cluster_name, nodegroup_name=ng.name)
                ]
                _logger.debug("Creating ManagedNodeGroups: %s", nodegroups)
                sh.run(f"eksctl create nodegroup -f {output_filename} --include={','.join(nodegroups)} --verbose 4")
                current_nodegroups.extend(
                    [ng for ng in changeset.managed_nodegroups_changeset.added_nodegroups if ng.name in nodegroups]
                )
                context.managed_nodegroups = current_nodegroups
                ContextSerDe.dump_context_to_ssm(context=context)

            if changeset.managed_nodegroups_changeset.removed_nodegroups:
                output_filename = generate_manifest(
                    context=context,
                    name=stack_name,
                    nodegroups=changeset.managed_nodegroups_changeset.removed_nodegroups,
                )
                nodegroups = [
                    ng.name
                    for ng in changeset.managed_nodegroups_changeset.removed_nodegroups
                    if eks.describe_nodegroup(cluster_name=cluster_name, nodegroup_name=ng.name)
                ]
                _logger.debug("Deleting ManagedNodeGroups: %s", nodegroups)
                sh.run(
                    f"eksctl delete nodegroup -f {output_filename} --include={','.join(nodegroups)} "
                    "--approve --wait --drain=false --verbose 4"
                )
                context.managed_nodegroups = [ng for ng in current_nodegroups if ng.name not in nodegroups]
                ContextSerDe.dump_context_to_ssm(context=context)

            if changeset.managed_nodegroups_changeset.modified_nodegroups:
                for ng in changeset.managed_nodegroups_changeset.modified_nodegroups:
                    autoscaling.update_nodegroup_autoscaling_group(
                        cluster_name=f"orbit-{context.name}", nodegroup_manifest=ng
                    )

    eks_system_masters_changeset = (
        changeset.eks_system_masters_roles_changeset
        if changeset and changeset.eks_system_masters_roles_changeset
        else None
    )
    map_iam_identities(
        context=context,
        cluster_name=cluster_name,
        eks_system_masters_roles_changes=eks_system_masters_changeset,
    )

    associate_open_id_connect_provider(context=context, cluster_name=cluster_name)
    fetch_cluster_data(context=context, cluster_name=cluster_name)
    authorize_cluster_pod_security_group(context=context)

    iam.add_assume_role_statement(
        role_name=f"orbit-{context.name}-{context.region}-cluster-autoscaler-role",
        statement={
            "Effect": "Allow",
            "Principal": {"Federated": f"arn:aws:iam::{context.account_id}:oidc-provider/{context.eks_oidc_provider}"},
            "Action": "sts:AssumeRoleWithWebIdentity",
            "Condition": {
                "StringLike": {
                    f"{context.eks_oidc_provider}:sub": "system:serviceaccount:kube-system:cluster-autoscaler"
                }
            },
        },
    )

    iam.add_assume_role_statement(
        role_name=f"orbit-{context.name}-{context.region}-eks-cluster-role",
        statement={
            "Effect": "Allow",
            "Principal": {"Federated": f"arn:aws:iam::{context.account_id}:oidc-provider/{context.eks_oidc_provider}"},
            "Action": "sts:AssumeRoleWithWebIdentity",
            "Condition": {
                "StringLike": {
                    f"{context.eks_oidc_provider}:sub": "system:serviceaccount:kube-system:fsx-csi-controller-sa"
                }
            },
        },
    )

    iam.add_assume_role_statement(
        role_name=cast(str, context.toolkit.admin_role),
        statement={
            "Effect": "Allow",
            "Principal": {"Federated": f"arn:aws:iam::{context.account_id}:oidc-provider/{context.eks_oidc_provider}"},
            "Action": "sts:AssumeRoleWithWebIdentity",
            "Condition": {
                "StringLike": {
                    f"{context.eks_oidc_provider}:sub": f"system:serviceaccount:orbit-system:orbit-{context.name}-admin"
                }
            },
        },
    )

    if not context.networking.data.internet_accessible:
        eks.create_fargate_profile(
            profile_name=f"orbit-{context.name}-system",
            cluster_name=f"orbit-{context.name}",
            role_arn=cast(str, context.eks_fargate_profile_role_arn),
            subnets=[s.subnet_id for s in context.networking.private_subnets],
            namespaces=["cert-manager", "istio-system", "kubeflow", "kube-system", "orbit-system"],
            selector_labels={"orbit/node-type": "fargate"},
        )

    _logger.debug("EKSCTL deployed")