torchbiggraph/partitionserver.py [44:54]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if not td.is_available():
        raise RuntimeError(
            "The installed PyTorch version doesn't provide "
            "distributed training capabilities."
        )
    ranks = ProcessRanks.from_num_invocations(
        config.num_machines, config.num_partition_servers
    )

    num_ps_groups = config.num_groups_for_partition_server
    groups: List[List[int]] = [ranks.trainers]  # barrier group
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



torchbiggraph/train_cpu.py [310:320]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            if not td.is_available():
                raise RuntimeError(
                    "The installed PyTorch version doesn't provide "
                    "distributed training capabilities."
                )
            ranks = ProcessRanks.from_num_invocations(
                config.num_machines, config.num_partition_servers
            )

            num_ps_groups = config.num_groups_for_partition_server
            groups: List[List[int]] = [ranks.trainers]  # barrier group
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



