research/gam/gam/trainer/trainer_classification.py [289:352]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))

    # Create Tensorboard summaries.
    if self.enable_summaries:
      summaries = [
          tf.summary.scalar('loss_supervised', loss_supervised),
          tf.summary.scalar('loss_agr', loss_agr),
          tf.summary.scalar('loss_reg', loss_reg),
          tf.summary.scalar('loss_total', loss_op)
      ]
      self.summary_op = tf.summary.merge(summaries)

    # Create learning rate schedule and optimizer.
    self.global_step = tf.train.get_or_create_global_step()
    if self.lr_decay_steps is not None and self.lr_decay_rate is not None:
      self.lr = tf.train.exponential_decay(
          self.lr_initial,
          self.global_step,
          self.lr_decay_steps,
          self.lr_decay_rate,
          staircase=True)
      self.optimizer = optimizer(self.lr)
    else:
      self.optimizer = optimizer(lr_initial)

    # Get trainable variables and compute gradients.
    grads_and_vars = self.optimizer.compute_gradients(
        loss_op,
        tf.trainable_variables(scope=tf.get_default_graph().get_name_scope()))
    # Clip gradients.
    if self.gradient_clip:
      variab = [elem[1] for elem in grads_and_vars]
      gradients = [elem[0] for elem in grads_and_vars]
      gradients, _ = tf.clip_by_global_norm(gradients, self.gradient_clip)
      grads_and_vars = tuple(zip(gradients, variab))
    with tf.control_dependencies(
        tf.get_collection(
            tf.GraphKeys.UPDATE_OPS,
            scope=tf.get_default_graph().get_name_scope())):
      train_op = self.optimizer.apply_gradients(
          grads_and_vars, global_step=self.global_step)

    # Create a saver for model variables.
    trainable_vars = [v for _, v in grads_and_vars]

    # Put together the subset of variables to save and restore from the best
    # validation accuracy as we train the agreement model in one cotrain round.
    vars_to_save = trainable_vars + []
    if isinstance(weight_decay_var, tf.Variable):
      vars_to_save.append(weight_decay_var)
    saver = tf.train.Saver(vars_to_save)

    # Put together all variables that need to be saved in case the process is
    # interrupted and needs to be restarted.
    self.vars_to_save = [iter_cls_total, self.global_step]
    if isinstance(weight_decay_var, tf.Variable):
      self.vars_to_save.append(weight_decay_var)
    if self.warm_start:
      self.vars_to_save.extend([v for v in self.variables])

    # More variables to be initialized after the session is created.
    self.is_initialized = False

    self.rng = np.random.RandomState(seed)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



research/gam/gam/trainer/trainer_classification_gcn.py [322:385]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))

    # Create Tensorboard summaries.
    if self.enable_summaries:
      summaries = [
          tf.summary.scalar('loss_supervised', loss_supervised),
          tf.summary.scalar('loss_agr', loss_agr),
          tf.summary.scalar('loss_reg', loss_reg),
          tf.summary.scalar('loss_total', loss_op)
      ]
      self.summary_op = tf.summary.merge(summaries)

    # Create learning rate schedule and optimizer.
    self.global_step = tf.train.get_or_create_global_step()
    if self.lr_decay_steps is not None and self.lr_decay_rate is not None:
      self.lr = tf.train.exponential_decay(
          self.lr_initial,
          self.global_step,
          self.lr_decay_steps,
          self.lr_decay_rate,
          staircase=True)
      self.optimizer = optimizer(self.lr)
    else:
      self.optimizer = optimizer(lr_initial)

    # Get trainable variables and compute gradients.
    grads_and_vars = self.optimizer.compute_gradients(
        loss_op,
        tf.trainable_variables(scope=tf.get_default_graph().get_name_scope()))
    # Clip gradients.
    if self.gradient_clip:
      variab = [elem[1] for elem in grads_and_vars]
      gradients = [elem[0] for elem in grads_and_vars]
      gradients, _ = tf.clip_by_global_norm(gradients, self.gradient_clip)
      grads_and_vars = tuple(zip(gradients, variab))
    with tf.control_dependencies(
        tf.get_collection(
            tf.GraphKeys.UPDATE_OPS,
            scope=tf.get_default_graph().get_name_scope())):
      train_op = self.optimizer.apply_gradients(
          grads_and_vars, global_step=self.global_step)

    # Create a saver for model variables.
    trainable_vars = [v for _, v in grads_and_vars]

    # Put together the subset of variables to save and restore from the best
    # validation accuracy as we train the agreement model in one cotrain round.
    vars_to_save = trainable_vars + []
    if isinstance(weight_decay_var, tf.Variable):
      vars_to_save.append(weight_decay_var)
    saver = tf.train.Saver(vars_to_save)

    # Put together all variables that need to be saved in case the process is
    # interrupted and needs to be restarted.
    self.vars_to_save = [iter_cls_total, self.global_step]
    if isinstance(weight_decay_var, tf.Variable):
      self.vars_to_save.append(weight_decay_var)
    if self.warm_start:
      self.vars_to_save.extend([v for v in self.variables])

    # More variables to be initialized after the session is created.
    self.is_initialized = False

    self.rng = np.random.RandomState(seed)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



