in cli/src/pcluster/templates/cdk_builder_utils.py [0:0]
def _build_policy(self) -> List[iam.PolicyStatement]:
policy = [
iam.PolicyStatement(
sid="Ec2",
actions=[
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeInstanceAttribute",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"ec2:DescribeVolumes",
],
effect=iam.Effect.ALLOW,
resources=["*"],
),
iam.PolicyStatement(
sid="Ec2TagsAndVolumes",
actions=["ec2:AttachVolume", "ec2:CreateTags", "ec2:DetachVolume"],
effect=iam.Effect.ALLOW,
resources=[
self._format_arn(
service="ec2",
resource="instance/*",
region=Stack.of(self).region,
account=Stack.of(self).account,
),
self._format_arn(
service="ec2",
resource="volume/*",
region=Stack.of(self).region,
account=Stack.of(self).account,
),
],
),
iam.PolicyStatement(
sid="S3GetObj",
actions=["s3:GetObject"],
effect=iam.Effect.ALLOW,
resources=[
self._format_arn(
service="s3",
resource="{0}-aws-parallelcluster/*".format(Stack.of(self).region),
region="",
account="",
)
],
),
iam.PolicyStatement(
sid="ResourcesS3Bucket",
effect=iam.Effect.ALLOW,
actions=["s3:GetObject", "s3:GetObjectVersion", "s3:GetBucketLocation", "s3:ListBucket"],
resources=[
self._format_arn(service="s3", resource=self._cluster_bucket.name, region="", account=""),
self._format_arn(
service="s3",
resource=f"{self._cluster_bucket.name}/{self._cluster_bucket.artifact_directory}/*",
region="",
account="",
),
],
),
iam.PolicyStatement(
sid="CloudFormation",
actions=[
"cloudformation:DescribeStacks",
"cloudformation:DescribeStackResource",
"cloudformation:SignalResource",
],
effect=iam.Effect.ALLOW,
resources=[core.Aws.STACK_ID],
),
iam.PolicyStatement(
sid="DcvLicense",
actions=[
"s3:GetObject",
],
effect=iam.Effect.ALLOW,
resources=[
self._format_arn(
service="s3",
resource="dcv-license.{0}/*".format(Stack.of(self).region),
region="",
account="",
)
],
),
]
if (
self._config.scheduling.scheduler == "slurm"
and self._config.scheduling.settings
and self._config.scheduling.settings.munge_key_secret_arn
):
policy.extend(
[
iam.PolicyStatement(
sid="SecretsManager",
actions=["secretsmanager:GetSecretValue"],
effect=iam.Effect.ALLOW,
resources=[self._config.scheduling.settings.munge_key_secret_arn],
),
]
)
if self._config.login_nodes:
policy.extend(
[
iam.PolicyStatement(
sid="TargetGroupDescribe",
actions=[
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
],
effect=iam.Effect.ALLOW,
resources=["*"],
),
]
)
if self._config.scheduling.scheduler != "awsbatch":
policy.extend(
[
iam.PolicyStatement(
sid="EC2Terminate",
actions=["ec2:TerminateInstances"],
effect=iam.Effect.ALLOW,
resources=["*"],
conditions={
"StringEquals": {f"ec2:ResourceTag/{PCLUSTER_CLUSTER_NAME_TAG}": Stack.of(self).stack_name}
},
),
iam.PolicyStatement(
sid="EC2RunInstancesCreateFleet",
actions=["ec2:RunInstances", "ec2:CreateFleet"],
effect=iam.Effect.ALLOW,
resources=[
self._format_arn(service="ec2", resource=f"subnet/{subnet_id}")
for subnet_id in self._config.compute_subnet_ids
]
+ [
self._format_arn(service="ec2", resource="fleet/*"),
self._format_arn(service="ec2", resource="network-interface/*"),
self._format_arn(service="ec2", resource="instance/*"),
self._format_arn(service="ec2", resource="volume/*"),
self._format_arn(service="ec2", resource=f"key-pair/{self._config.head_node.ssh.key_name}"),
self._format_arn(service="ec2", resource="security-group/*"),
self._format_arn(service="ec2", resource="launch-template/*"),
self._format_arn(service="ec2", resource="placement-group/*"),
]
+ [
self._format_arn(service="ec2", resource=f"image/{queue_ami}", account="")
for _, queue_ami in self._config.image_dict.items()
],
),
iam.PolicyStatement(
sid="EC2DescribeCapacityReservations",
actions=["ec2:DescribeCapacityReservations"],
effect=iam.Effect.ALLOW,
resources=["*"],
),
iam.PolicyStatement(
sid="PassRole",
actions=["iam:PassRole"],
effect=iam.Effect.ALLOW,
resources=self._generate_head_node_pass_role_resources(),
),
iam.PolicyStatement(
sid="DynamoDBTable",
actions=[
"dynamodb:UpdateItem",
"dynamodb:PutItem",
"dynamodb:GetItem",
"dynamodb:BatchGetItem",
],
effect=iam.Effect.ALLOW,
resources=[
self._format_arn(
service="dynamodb",
resource=f"table/{PCLUSTER_DYNAMODB_PREFIX}{Stack.of(self).stack_name}",
)
],
),
]
)
self._add_compute_console_output_policy_statement(policy)
capacity_reservation_ids = self._config.capacity_reservation_ids
if capacity_reservation_ids:
policy.append(
iam.PolicyStatement(
sid="AllowRunningReservedCapacity",
actions=["ec2:RunInstances"],
effect=iam.Effect.ALLOW,
resources=self._config.capacity_reservation_arns,
)
)
capacity_reservation_resource_group_arns = self._config.capacity_reservation_resource_group_arns
if capacity_reservation_resource_group_arns:
policy.extend(
[
iam.PolicyStatement(
sid="AllowManagingReservedCapacity",
actions=["ec2:RunInstances", "ec2:CreateFleet", "resource-groups:ListGroupResources"],
effect=iam.Effect.ALLOW,
resources=capacity_reservation_resource_group_arns,
)
]
)
if self._config.directory_service:
password_secret_arn = Arn.split(
self._config.directory_service.password_secret_arn, ArnFormat.COLON_RESOURCE_NAME
)
policy.append(
iam.PolicyStatement(
sid="AllowGettingDirectorySecretValue",
actions=[
(
"secretsmanager:GetSecretValue"
if password_secret_arn.service == "secretsmanager"
else "ssm:GetParameter" if password_secret_arn.service == "ssm" else None
)
],
effect=iam.Effect.ALLOW,
resources=[self._config.directory_service.password_secret_arn],
)
)
if self._config.scheduling.scheduler == "slurm" and self._config.scheduling.settings.database:
policy.append(
iam.PolicyStatement(
sid="AllowGettingSlurmDbSecretValue",
actions=["secretsmanager:GetSecretValue"],
effect=iam.Effect.ALLOW,
resources=[self._config.scheduling.settings.database.password_secret_arn],
)
)
return policy