def configure()

in cli/src/pcluster/cli/commands/configure/easyconfig.py [0:0]


def configure(args):  # noqa: C901
    config_file_path = args.config
    # Check for invalid path (eg. a directory)
    if os.path.exists(config_file_path):
        error(f"A file/folder exists at {config_file_path}. Please specify a different file path")

    print(f"INFO: Configuration file {config_file_path} will be written.")
    print("Press CTRL-C to interrupt the procedure.\n\n")

    if not args.region:
        # Use built in boto regions as an available option
        available_regions = get_regions()
        aws_region_name = prompt_iterable(
            "AWS Region ID", available_regions, default_value=boto3.session.Session().region_name
        )
        # Set provided region into os environment for suggestions and validations from here on
        os.environ["AWS_DEFAULT_REGION"] = aws_region_name
    else:
        os.environ["AWS_DEFAULT_REGION"] = args.region

    # Get the key name from the current region, if any
    available_keys = _get_keys()
    key_name = prompt_iterable("EC2 Key Pair Name", available_keys)

    scheduler = prompt_iterable("Scheduler", SUPPORTED_SCHEDULERS)

    if scheduler == "awsbatch":
        base_os = "alinux2"
    else:
        base_os = prompt_iterable("Operating System", get_supported_os_for_scheduler(scheduler))

    default_instance_type = AWSApi.instance().ec2.get_default_instance_type()
    head_node_instance_type = prompt(
        "Head node instance type",
        lambda x: x in AWSApi.instance().ec2.list_instance_types(),
        default_value=default_instance_type,
    )
    if scheduler == "awsbatch":
        number_of_queues = 1
        size_name = "vCPU"
    else:
        number_of_queues = int(
            prompt(
                "Number of queues",
                lambda x: str(x).isdigit() and 1 <= int(x) <= MAX_NUMBER_OF_QUEUES,
                default_value=1,
            )
        )
        size_name = "instance count"

    queues = []
    queue_names = []
    compute_instance_types = []
    cluster_size = 0  # Sum of maximum count through all the compute resources
    for queue_index in range(number_of_queues):
        while True:
            queue_name = prompt(
                f"Name of queue {queue_index+1}",
                validator=lambda x: len(NameValidator().execute(x)) == 0,
                default_value=f"queue{queue_index+1}",
            )
            if queue_name not in queue_names:
                break
            print(
                f"Error: The name {queue_name} cannot be used for multiple queues. Please insert a different queue "
                "name."
            )

        if scheduler == "awsbatch":
            number_of_compute_resources = 1
        else:
            crs_per_queue_limit = MAX_NUMBER_OF_COMPUTE_RESOURCES_PER_CLUSTER // number_of_queues

            number_of_compute_resources = int(
                prompt(
                    f"Number of compute resources for {queue_name}",
                    validator=lambda x, q=crs_per_queue_limit: str(x).isdigit() and 1 <= int(x) <= q,
                    default_value=1,
                )
            )
        compute_resources = []
        efa_enabled_in_queue = False
        for compute_resource_index in range(number_of_compute_resources):
            efa_enabled_in_compute_resource = False
            efa_supported_by_instance_type = False
            if scheduler != "awsbatch":
                while True:
                    compute_instance_type = prompt(
                        f"Compute instance type for compute resource {compute_resource_index+1} in {queue_name}",
                        validator=lambda x: x in AWSApi.instance().ec2.list_instance_types(),
                        default_value=default_instance_type,
                    )
                    if compute_instance_type not in [
                        instances["InstanceType"]
                        for compute_resource in compute_resources
                        for instances in compute_resource["Instances"]
                    ]:
                        break
                    print(
                        f"Error: Instance type {compute_instance_type} cannot be specified for multiple compute "
                        "resources in the same queue. Please insert a different instance type."
                    )
                compute_resource_name = re.sub(r"[^A-Za-z0-9]", "", compute_instance_type)

                efa_supported_by_instance_type = instance_type_supports_efa(compute_instance_type)
                if efa_supported_by_instance_type:
                    efa_enabled_in_compute_resource = _prompt_for_efa(compute_instance_type)
                    if efa_enabled_in_compute_resource:
                        efa_enabled_in_queue = True
            min_cluster_size = DEFAULT_MIN_COUNT
            max_cluster_size = int(
                prompt(
                    "Maximum {0}".format(size_name),
                    validator=lambda x, ms=min_cluster_size: str(x).isdigit() and int(x) >= ms,  # pylint: disable=W0640
                    default_value=DEFAULT_MAX_COUNT,
                )
            )
            if scheduler == "awsbatch":
                compute_resources.append(
                    {
                        "Name": "optimal",
                        "InstanceTypes": ["optimal"],
                        "MinvCpus": min_cluster_size,
                        "DesiredvCpus": min_cluster_size,
                        "MaxvCpus": max_cluster_size,
                    }
                )
            else:
                compute_resource = {
                    "Name": compute_resource_name,
                    "Instances": [{"InstanceType": compute_instance_type}],
                    "MinCount": min_cluster_size,
                    "MaxCount": max_cluster_size,
                }
                if efa_supported_by_instance_type:
                    compute_resource["Efa"] = {"Enabled": efa_enabled_in_compute_resource}

                compute_resources.append(compute_resource)
                compute_instance_types.append(compute_instance_type)

            queue_names.append(queue_name)
            cluster_size += max_cluster_size  # Fixme: is it the right calculation for awsbatch?

        queue = {
            "Name": queue_name,
            "ComputeResources": compute_resources,
        }
        if efa_enabled_in_queue:
            placement_group = {"Enabled": True}
            placement_group_name = _prompt_for_placement_group()
            if placement_group_name:
                placement_group["Id"] = placement_group_name

            networking = queue.get("Networking", {})
            networking["PlacementGroup"] = placement_group
            queue["Networking"] = networking

        queues.append(queue)

    vpc_parameters = _create_vpc_parameters(scheduler, head_node_instance_type, compute_instance_types, cluster_size)

    # Here is the end of prompt. Code below assembles config and write to file
    for queue in queues:
        networking = queue.get("Networking", {})
        networking["SubnetIds"] = [vpc_parameters["compute_subnet_id"]]
        queue["Networking"] = networking

    head_node_config = {
        "InstanceType": head_node_instance_type,
        "Networking": {"SubnetId": vpc_parameters["head_node_subnet_id"]},
        "Ssh": {"KeyName": key_name},
    }
    if scheduler == "awsbatch":
        scheduler_prefix = "AwsBatch"
        head_node_config["Imds"] = {"Secured": False}
    else:
        scheduler_prefix = scheduler.capitalize()

    result = {
        "Region": os.environ.get("AWS_DEFAULT_REGION"),
        "Image": {"Os": base_os},
        "HeadNode": head_node_config,
        "Scheduling": {"Scheduler": scheduler, f"{scheduler_prefix}Queues": queues},
    }

    _write_configuration_file(config_file_path, result)
    print(
        "You can edit your configuration file or simply run 'pcluster create-cluster --cluster-configuration "
        f"{config_file_path} --cluster-name cluster-name --region {get_region()}' to create your cluster."
    )