def __init__()

in graphlearn_torch/python/data/feature.py [0:0]


  def __init__(self,
               feature_tensor: TensorDataType,
               id2index: Optional[Union[torch.Tensor, Sequence]] = None,
               split_ratio: float = 0.0,
               device_group_list: Optional[List[DeviceGroup]] = None,
               device: Optional[int] = None,
               with_gpu: Optional[bool] = True,
               dtype: torch.dtype = torch.float32):
    self.feature_tensor = convert_to_tensor(feature_tensor, dtype)
    self.id2index = convert_to_tensor(id2index, dtype=torch.int64)
    self.split_ratio = float(split_ratio)
    self.device_group_list = device_group_list
    self.device = device
    self.with_gpu = with_gpu
    self.dtype = dtype

    self._device2group = {}
    self._unified_tensors = {}
    self._cuda_id2index = None
    self._ipc_handle = None
    self._cuda_ipc_handle_dict = None

    if self.feature_tensor is not None:
      self.feature_tensor = share_memory(self.feature_tensor.cpu())

    if self.with_gpu:
      if self.device_group_list is None:
        self.device_group_list = [
          DeviceGroup(i, [i]) for i in range(torch.cuda.device_count())]

      self._device2group = {}
      group_size = self.device_group_list[0].size
      for dg in self.device_group_list:
        assert group_size == dg.size
        for d in dg.device_list:
          self._device2group[d] = dg.group_id
      if self.feature_tensor is not None:
        self._split_and_init()