def role_to_pod()

in torchx/schedulers/kubernetes_scheduler.py [0:0]


def role_to_pod(name: str, role: Role) -> "V1Pod":
    from kubernetes.client.models import (  # noqa: F811 redefinition of unused
        V1Pod,
        V1PodSpec,
        V1Container,
        V1EnvVar,
        V1ResourceRequirements,
        V1ContainerPort,
        V1ObjectMeta,
    )

    requests = {}

    resource = role.resource
    if resource.cpu >= 0:
        requests["cpu"] = f"{int(resource.cpu * 1000)}m"
    if resource.memMB >= 0:
        requests["memory"] = f"{int(resource.memMB)}M"
    if resource.gpu >= 0:
        requests["nvidia.com/gpu"] = str(resource.gpu)

    resources = V1ResourceRequirements(
        limits=requests,
        requests=requests,
    )

    container = V1Container(
        command=[role.entrypoint] + role.args,
        image=role.image,
        name=name,
        env=[
            V1EnvVar(
                name=name,
                value=value,
            )
            for name, value in role.env.items()
        ],
        resources=resources,
        ports=[
            V1ContainerPort(
                name=name,
                container_port=port,
            )
            for name, port in role.port_map.items()
        ],
    )
    return V1Pod(
        spec=V1PodSpec(
            containers=[container],
            restart_policy="Never",
        ),
        metadata=V1ObjectMeta(
            annotations={
                # Disable the istio sidecar as it prevents the containers from
                # exiting once finished.
                ANNOTATION_ISTIO_SIDECAR: "false",
            },
            labels={},
        ),
    )