research/gam/gam/trainer/trainer_agreement.py [220:247]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    self.global_step = tf.train.get_or_create_global_step()
    if self.lr_decay_steps is not None and self.lr_decay_rate is not None:
      self.lr = tf.train.exponential_decay(
          self.lr_initial,
          self.global_step,
          self.lr_decay_steps,
          self.lr_decay_rate,
          staircase=True)
      self.optimizer = optimizer(self.lr)
    else:
      self.optimizer = optimizer(lr_initial)

    # Create train op.
    grads_and_vars = self.optimizer.compute_gradients(
        loss_op,
        tf.trainable_variables(scope=tf.get_default_graph().get_name_scope()))
    # Clip gradients.
    if self.gradient_clip:
      variab = [elem[1] for elem in grads_and_vars]
      gradients = [elem[0] for elem in grads_and_vars]
      gradients, _ = tf.clip_by_global_norm(gradients, self.gradient_clip)
      grads_and_vars = tuple(zip(gradients, variab))
    with tf.control_dependencies(
        tf.get_collection(
            tf.GraphKeys.UPDATE_OPS,
            scope=tf.get_default_graph().get_name_scope())):
      train_op = self.optimizer.apply_gradients(
          grads_and_vars, global_step=self.global_step)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



research/gam/gam/trainer/trainer_classification_gcn.py [335:362]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    self.global_step = tf.train.get_or_create_global_step()
    if self.lr_decay_steps is not None and self.lr_decay_rate is not None:
      self.lr = tf.train.exponential_decay(
          self.lr_initial,
          self.global_step,
          self.lr_decay_steps,
          self.lr_decay_rate,
          staircase=True)
      self.optimizer = optimizer(self.lr)
    else:
      self.optimizer = optimizer(lr_initial)

    # Get trainable variables and compute gradients.
    grads_and_vars = self.optimizer.compute_gradients(
        loss_op,
        tf.trainable_variables(scope=tf.get_default_graph().get_name_scope()))
    # Clip gradients.
    if self.gradient_clip:
      variab = [elem[1] for elem in grads_and_vars]
      gradients = [elem[0] for elem in grads_and_vars]
      gradients, _ = tf.clip_by_global_norm(gradients, self.gradient_clip)
      grads_and_vars = tuple(zip(gradients, variab))
    with tf.control_dependencies(
        tf.get_collection(
            tf.GraphKeys.UPDATE_OPS,
            scope=tf.get_default_graph().get_name_scope())):
      train_op = self.optimizer.apply_gradients(
          grads_and_vars, global_step=self.global_step)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



