def partition()

in graphlearn_torch/python/distributed/dist_table_dataset.py [0:0]


  def partition(self):
    r""" Partition graph and feature data into different parts along with all
    other distributed partitioners, save the result of the current partition
    index into output directory.
    """
    if 'hetero' == self.data_cls:
      node_pb_dict = {}
      node_feat_dict = {}
      for ntype in self.node_types:
        node_pb = self._partition_node(ntype)
        node_pb_dict[ntype] = node_pb

        current_node_feat_part = self._partition_node_feat(node_pb, ntype)
        if current_node_feat_part is not None:
          node_feat_dict[ntype] = current_node_feat_part

      edge_pb_dict = {}
      graph_dict = {}
      edge_feat_dict = {}
      for etype in self.edge_types:
        current_graph_part, edge_pb = self._partition_graph(node_pb_dict, etype)
        edge_pb_dict[etype] = edge_pb
        graph_dict[etype] = current_graph_part

        current_edge_feat_part = self._partition_edge_feat(edge_pb, etype)
        if current_edge_feat_part is not None:
          edge_feat_dict[etype] = current_edge_feat_part

      return (
        self.num_parts, self.current_partition_idx,
        graph_dict, node_feat_dict, edge_feat_dict, node_pb_dict, edge_pb_dict
      )
    else:
      node_pb = self._partition_node()
      node_feat = self._partition_node_feat(node_pb)
      graph, edge_pb = self._partition_graph(node_pb)
      edge_feat = self._partition_edge_feat(edge_pb)

      return (
        self.num_parts, self.current_partition_idx,
        graph, node_feat, edge_feat, node_pb, edge_pb
      )