def __init__()

in sample_workloads/megatron-gke/docker/monitor_collectives.py [0:0]


  def __init__(
      self, name, message_size, group=None, peer_rank=None, root_rank=None):
    self.name = name
    self.rank = torch.distributed.get_rank()
    self.source_line = _get_call_source_line()
    self.message_size = message_size
    self.device = torch.cuda.current_device()
    if group is not None:
      self.group_ranks = torch.distributed.get_process_group_ranks(group=group)
    if peer_rank is not None:
      self.peer_rank = peer_rank
    if root_rank is not None:
      self.root_rank = root_rank