adanet/core/iteration.py [967:977]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        increment_step_op = None
      else:
        with tf.control_dependencies([spec.train_op.train_op]):
          increment_step_op = spec.step.assign_add(1)
      # TPU also supports uneven training, but up to num_iterations_per_loop.
      training_hooks.append(
          _TrainingLimitHook(
              train_manager,
              spec,
              self._max_steps,
              increment_step_op=increment_step_op))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



adanet/core/iteration.py [986:995]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        increment_step_op = None
      else:
        with tf.control_dependencies([spec.train_op.train_op]):
          increment_step_op = spec.step.assign_add(1)
      training_hooks.append(
          _TrainingLimitHook(
              train_manager,
              spec,
              self._max_steps,
              increment_step_op=increment_step_op))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



