research/gam/gam/trainer/trainer_classification.py [578:702]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.labels_ll_left: labels_src,
            self.labels_ll_right: labels_tgt
        })
      if pair_lu_iterator is not None:
        indices_src, indices_tgt, features_src, features_tgt, labels_src, _ = (
            next(pair_lu_iterator))
        feed_dict.update({
            self.indices_lu_left: indices_src,
            self.indices_lu_right: indices_tgt,
            self.features_lu_left: features_src,
            self.features_lu_right: features_tgt,
            self.labels_lu_left: labels_src
        })
      if pair_uu_iterator is not None:
        indices_src, indices_tgt, features_src, features_tgt, _, _ = next(
            pair_uu_iterator)
        feed_dict.update({
            self.indices_uu_left: indices_src,
            self.indices_uu_right: indices_tgt,
            self.features_uu_left: features_src,
            self.features_uu_right: features_tgt
        })
      return feed_dict
    except StopIteration:
      # If the iterator has finished, return None.
      return None

  def pair_iterator(self, src_indices, tgt_indices, batch_size, data):
    """Iterator over pairs of samples.

    The first element of the pair is selected from the src_indices, and the
    second element is selected from tgt_indices.

    Args:
      src_indices: Numpy array containing the indices available for the source
        node.
      tgt_indices: Numpy array containing the indices available for the tgt
        node.
      batch_size: An integer representing the desired batch size.
      data: A CotrainDataset object used to extract the features and labels.

    Yields:
      indices_src, indices_tgt, features_src, features_tgt, labels_src,
      labels_tgt
    """

    def _select_from_pool(indices):
      """Selects batch_size indices from the provided list."""
      num_indices = len(indices)
      if num_indices > 0:
        idxs = self.rng.randint(0, high=num_indices, size=(batch_size,))
        indices_batch = indices[idxs]
        features_batch = data.get_features(indices_batch)
        labels_batch = data.get_labels(indices_batch)
      else:
        features_shape = [0] + list(data.features_shape)
        indices_batch = np.zeros(shape=(0,), dtype=np.int64)
        features_batch = np.zeros(shape=features_shape, dtype=np.float32)
        labels_batch = np.zeros(shape=(0,), dtype=np.int64)
      return indices_batch, features_batch, labels_batch

    while True:
      indices_src, features_src, labels_src = _select_from_pool(src_indices)
      indices_tgt, features_tgt, labels_tgt = _select_from_pool(tgt_indices)
      yield (indices_src, indices_tgt, features_src, features_tgt, labels_src,
             labels_tgt)

  def edge_iterator(self, data, batch_size, labeling):
    """An iterator over graph edges.

    Args:
      data: A CotrainDataset object used to extract the features and labels.
      batch_size:  An integer representing the desired batch size.
      labeling: A string which can be `ll`, `lu` or `uu`, that is used to
        represent the type of edges to return, where `ll` refers to
        labeled-labeled, `lu` refers to labeled-unlabeled, and `uu` refers to
        unlabeled-unlabeled.

    Yields:
      indices_src, indices_tgt, features_src, features_tgt, labels_src,
      labels_tgt
    """
    if labeling == 'll':
      edges = data.get_edges(
          src_labeled=True, tgt_labeled=True, label_must_match=True)
    elif labeling == 'lu':
      edges_lu = data.get_edges(src_labeled=True, tgt_labeled=False)
      edges_ul = data.get_edges(src_labeled=False, tgt_labeled=True)
      # Reverse the edges of UL to be LU.
      edges_ul = [e.copy(src=e.tgt, tgt=e.src) for e in edges_ul]
      edges = edges_lu + edges_ul
    elif labeling == 'uu':
      edges = data.get_edges(src_labeled=False, tgt_labeled=False)
    else:
      raise ValueError('Unsupported value for parameter `labeling`.')

    if not edges:
      indices = np.zeros(shape=(0,), dtype=np.int32)
      features = np.zeros(
          shape=[
              0,
          ] + list(data.features_shape), dtype=np.float32)
      labels = np.zeros(shape=(0,), dtype=np.int64)
      while True:
        yield (indices, indices, features, features, labels, labels)

    edges = np.stack([(e.src, e.tgt) for e in edges])
    iterator = batch_iterator(
        inputs=edges,
        batch_size=batch_size,
        shuffle=True,
        allow_smaller_batch=False,
        repeat=True)

    for edge in iterator:
      indices_src = edge[:, 0]
      indices_tgt = edge[:, 1]
      features_src = data.get_features(indices_src)
      features_tgt = data.get_features(indices_tgt)
      labels_src = data.get_labels(indices_src)
      labels_tgt = data.get_labels(indices_tgt)
      yield (indices_src, indices_tgt, features_src, features_tgt, labels_src,
             labels_tgt)

  def _evaluate(self, indices, split, session, summary_writer):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



research/gam/gam/trainer/trainer_classification_gcn.py [606:730]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.labels_ll_left: labels_src,
            self.labels_ll_right: labels_tgt
        })
      if pair_lu_iterator is not None:
        indices_src, indices_tgt, features_src, features_tgt, labels_src, _ = (
            next(pair_lu_iterator))
        feed_dict.update({
            self.indices_lu_left: indices_src,
            self.indices_lu_right: indices_tgt,
            self.features_lu_left: features_src,
            self.features_lu_right: features_tgt,
            self.labels_lu_left: labels_src
        })
      if pair_uu_iterator is not None:
        indices_src, indices_tgt, features_src, features_tgt, _, _ = next(
            pair_uu_iterator)
        feed_dict.update({
            self.indices_uu_left: indices_src,
            self.indices_uu_right: indices_tgt,
            self.features_uu_left: features_src,
            self.features_uu_right: features_tgt
        })
      return feed_dict
    except StopIteration:
      # If the iterator has finished, return None.
      return None

  def pair_iterator(self, src_indices, tgt_indices, batch_size, data):
    """Iterator over pairs of samples.

    The first element of the pair is selected from the src_indices, and the
    second element is selected from tgt_indices.

    Args:
      src_indices: Numpy array containing the indices available for the source
        node.
      tgt_indices: Numpy array containing the indices available for the tgt
        node.
      batch_size: An integer representing the desired batch size.
      data: A CotrainDataset object used to extract the features and labels.

    Yields:
      indices_src, indices_tgt, features_src, features_tgt, labels_src,
      labels_tgt
    """

    def _select_from_pool(indices):
      """Selects batch_size indices from the provided list."""
      num_indices = len(indices)
      if num_indices > 0:
        idxs = self.rng.randint(0, high=num_indices, size=(batch_size,))
        indices_batch = indices[idxs]
        features_batch = data.get_features(indices_batch)
        labels_batch = data.get_labels(indices_batch)
      else:
        features_shape = [0] + list(data.features_shape)
        indices_batch = np.zeros(shape=(0,), dtype=np.int64)
        features_batch = np.zeros(shape=features_shape, dtype=np.float32)
        labels_batch = np.zeros(shape=(0,), dtype=np.int64)
      return indices_batch, features_batch, labels_batch

    while True:
      indices_src, features_src, labels_src = _select_from_pool(src_indices)
      indices_tgt, features_tgt, labels_tgt = _select_from_pool(tgt_indices)
      yield (indices_src, indices_tgt, features_src, features_tgt, labels_src,
             labels_tgt)

  def edge_iterator(self, data, batch_size, labeling):
    """An iterator over graph edges.

    Args:
      data: A CotrainDataset object used to extract the features and labels.
      batch_size:  An integer representing the desired batch size.
      labeling: A string which can be `ll`, `lu` or `uu`, that is used to
        represent the type of edges to return, where `ll` refers to
        labeled-labeled, `lu` refers to labeled-unlabeled, and `uu` refers to
        unlabeled-unlabeled.

    Yields:
      indices_src, indices_tgt, features_src, features_tgt, labels_src,
      labels_tgt
    """
    if labeling == 'll':
      edges = data.get_edges(
          src_labeled=True, tgt_labeled=True, label_must_match=True)
    elif labeling == 'lu':
      edges_lu = data.get_edges(src_labeled=True, tgt_labeled=False)
      edges_ul = data.get_edges(src_labeled=False, tgt_labeled=True)
      # Reverse the edges of UL to be LU.
      edges_ul = [e.copy(src=e.tgt, tgt=e.src) for e in edges_ul]
      edges = edges_lu + edges_ul
    elif labeling == 'uu':
      edges = data.get_edges(src_labeled=False, tgt_labeled=False)
    else:
      raise ValueError('Unsupported value for parameter `labeling`.')

    if not edges:
      indices = np.zeros(shape=(0,), dtype=np.int32)
      features = np.zeros(
          shape=[
              0,
          ] + list(data.features_shape), dtype=np.float32)
      labels = np.zeros(shape=(0,), dtype=np.int64)
      while True:
        yield (indices, indices, features, features, labels, labels)

    edges = np.stack([(e.src, e.tgt) for e in edges])
    iterator = batch_iterator(
        inputs=edges,
        batch_size=batch_size,
        shuffle=True,
        allow_smaller_batch=False,
        repeat=True)

    for edge in iterator:
      indices_src = edge[:, 0]
      indices_tgt = edge[:, 1]
      features_src = data.get_features(indices_src)
      features_tgt = data.get_features(indices_tgt)
      labels_src = data.get_labels(indices_src)
      labels_tgt = data.get_labels(indices_tgt)
      yield (indices_src, indices_tgt, features_src, features_tgt, labels_src,
             labels_tgt)

  def _evaluate(self, indices, split, session, summary_writer):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



