def share_ipc()

in graphlearn_torch/python/data/feature.py [0:0]


  def share_ipc(self):
    r""" Create ipc handle for multiprocessing.
    """
    if self._ipc_handle is not None:
      return self._ipc_handle

    if self.id2index is not None and isinstance(self.id2index, torch.Tensor):
      self.id2index = self.id2index.cpu()
      self.id2index.share_memory_()

    if self._cuda_ipc_handle_dict is None:
      self._cuda_ipc_handle_dict = {}
      for group_id, tensor in self._unified_tensors.items():
        self._cuda_ipc_handle_dict[group_id] = tensor.share_ipc()[0]

    return (
      self.feature_tensor,
      self.id2index,
      self.split_ratio,
      self.device_group_list,
      self._cuda_ipc_handle_dict,
      self.with_gpu,
      self.dtype
    )