def _add_resources()

in cli/src/pcluster/templates/cluster_stack.py [0:0]


    def _add_resources(self):
        # Cloud Watch Logs
        self.log_group = None
        if self.config.is_cw_logging_enabled:
            self.log_group = self._add_cluster_log_group()

        # Managed security groups
        (
            self._head_security_group,
            self._compute_security_group,
            self._login_security_group,
        ) = self._add_security_groups()
        # Head Node ENI
        self._head_eni = self._add_head_eni()

        # Add the internal use shared storage to the stack
        # This FS will be mounted, the shared dirs will be added,
        # then it will be unmounted and the shared dirs will be
        # mounted.  We need to create the additional mount points first.
        if self.config.head_node.shared_storage_type.lower() == SharedStorageType.EFS.value:
            internal_efs_storage_shared = SharedEfs(
                mount_dir="/opt/parallelcluster/init_shared", name="internal_pcluster_shared", throughput_mode="elastic"
            )
            self._add_shared_storage(internal_efs_storage_shared)

        # Add user configured shared storage
        if self.config.shared_storage:
            for storage in self.config.shared_storage:
                self._add_shared_storage(storage)

        self._add_iam_resources()

        # Additional Cfn Stack
        if self.config.additional_resources:
            CfnStack(self.stack, "AdditionalCfnStack", template_url=self.config.additional_resources)

        # Cleanup Resources Lambda Function
        cleanup_lambda_role, cleanup_lambda = self._add_cleanup_resources_lambda()

        self._add_fleet_and_scheduler_resources(cleanup_lambda, cleanup_lambda_role)

        # Wait condition
        self.wait_condition, self.wait_condition_handle = self._add_wait_condition()

        # Head Node
        self.head_node_instance = self._add_head_node()
        # Add a dependency to the cleanup Route53 resource, so that Route53 Hosted Zone is cleaned after node is deleted
        if self._condition_is_slurm() and hasattr(self.scheduler_resources, "cleanup_route53_custom_resource"):
            self.head_node_instance.add_depends_on(self.scheduler_resources.cleanup_route53_custom_resource)

        # Initialize Login Nodes
        self._add_login_nodes_resources()

        # AWS Batch related resources
        if self._condition_is_batch():
            self.scheduler_resources = AwsBatchConstruct(
                scope=self.stack,
                id="AwsBatch",
                stack_name=self._stack_name,
                cluster_config=self.config,
                bucket=self.bucket,
                create_lambda_roles=self._condition_create_lambda_iam_role(),
                compute_security_group=self._compute_security_group,
                shared_storage_infos=self.shared_storage_infos,
                shared_storage_mount_dirs=self.shared_storage_mount_dirs,
                head_node_instance=self.head_node_instance,
                managed_head_node_instance_role=self._managed_head_node_instance_role,  # None if provided by the user
            )

        # Alarms
        if self.config.are_alarms_enabled:
            self._add_head_node_alarms()
        else:
            self.head_node_alarms = []

        # CloudWatch Dashboard
        if self.config.is_cw_dashboard_enabled:
            self.cloudwatch_dashboard = CWDashboardConstruct(
                scope=self.stack,
                id="PclusterDashboard",
                stack_name=self.stack.stack_name,
                cluster_config=self.config,
                head_node_instance=self.head_node_instance,
                shared_storage_infos=self.shared_storage_infos,
                cw_log_group_name=self.log_group.log_group_name if self.config.is_cw_logging_enabled else None,
                cw_log_group=self.log_group,
                head_node_alarms=self.head_node_alarms,
            )