def generate_manifest()

in cli/aws_orbit/remote_files/eksctl.py [0:0]


def generate_manifest(context: "Context", name: str, nodegroups: Optional[List[ManagedNodeGroupManifest]]) -> str:
    internet: bool = context.networking.data.internet_accessible

    # Fill cluster wide configs
    MANIFEST["metadata"]["name"] = name
    MANIFEST["metadata"]["region"] = context.region
    MANIFEST["vpc"]["clusterEndpoints"] = {"publicAccess": True, "privateAccess": not internet}
    if context.networking.vpc_id is None:
        raise ValueError("context.networking.vpc_id is None!")
    MANIFEST["vpc"]["id"] = context.networking.vpc_id
    if context.networking.vpc_cidr_block is None:
        raise ValueError("context.networking.vpc_cidr_block is None!")
    MANIFEST["vpc"]["cidr"] = context.networking.vpc_cidr_block

    for subnet in context.networking.public_subnets:
        if subnet.availability_zone is None:
            raise ValueError("subnet.availability_zone is None for %s!", subnet.subnet_id)
        if subnet.cidr_block is None:
            raise ValueError("subnet.cidr_block is None for %s!", subnet.subnet_id)
        MANIFEST["vpc"]["subnets"]["public"][subnet.availability_zone] = {
            "id": subnet.subnet_id,
            "cidr": subnet.cidr_block,
        }

    private_subnets = [
        s
        for s in context.networking.private_subnets + context.networking.isolated_subnets
        if s.subnet_id in context.networking.data.nodes_subnets
    ]
    if not private_subnets:
        raise ValueError(
            "No Non-public Subnets configured for the cluster, this may be a mismatch between "
            "InternetAccessibility and Manifest Subnets"
        )
    for subnet in private_subnets:
        if subnet.availability_zone is None:
            raise ValueError("subnet.availability_zone is None for %s!", subnet.subnet_id)
        if subnet.cidr_block is None:
            raise ValueError("subnet.cidr_block is None for %s!", subnet.subnet_id)
        MANIFEST["vpc"]["subnets"]["private"][subnet.availability_zone] = {
            "id": subnet.subnet_id,
            "cidr": subnet.cidr_block,
        }

    MANIFEST["iam"]["serviceRoleARN"] = context.eks_cluster_role_arn
    MANIFEST["iam"]["withOIDC"] = True
    MANIFEST["managedNodeGroups"] = []

    labels = {
        "orbit/node-group": "env",
        "orbit/usage": "reserved",
        "orbit/node-type": "ec2",
        "node-group": "env",
        "usage": "reserved",
        "node-type": "ec2",
    }
    tags = tags = {f"k8s.io/cluster-autoscaler/node-template/label/{k}": v for k, v in labels.items()}
    tags["Env"] = f"orbit-{context.name}"

    MANIFEST["addons"] = [
        {"name": "vpc-cni", "version": "v1.9.0", "attachPolicyARNs": ["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"]},
        {"name": "kube-proxy", "version": "v1.20.4-eksbuild.2"},
        {"name": "coredns", "version": "v1.8.3-eksbuild.1"},
    ]

    # Env
    MANIFEST["managedNodeGroups"].append(
        {
            "name": "env",
            "privateNetworking": True,
            "instanceType": "m5.large",
            "minSize": 2,
            "desiredCapacity": 3,
            "maxSize": 6,
            "volumeSize": 64,
            "ssh": {"allow": False},
            "labels": labels,
            "tags": tags,
            "iam": {"instanceRoleARN": context.eks_env_nodegroup_role_arn},
        }
    )

    # Fill nodegroups configs
    if nodegroups:
        for nodegroup in nodegroups:
            MANIFEST["managedNodeGroups"].append(create_nodegroup_structure(context=context, nodegroup=nodegroup))

    MANIFEST["cloudWatch"] = {"clusterLogging": {"enableTypes": ["*"]}}

    _logger.debug("eksctl manifest:\n%s", pprint.pformat(MANIFEST))
    output_filename = f".orbit.out/{context.name}/eksctl/cluster.yaml"
    os.makedirs(os.path.dirname(output_filename), exist_ok=True)
    with open(output_filename, "w") as file:
        yaml.dump(MANIFEST, file, sort_keys=False)

    _logger.debug("output_filename: %s", output_filename)
    return output_filename