tensorflow_privacy/privacy/estimators/v1/head.py [183:224]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    'class_ids': class_ids,
                    'weights': weights,
                    'unreduced_loss': unreduced_loss,
                    'regularization_loss': regularization_loss
                }))

      # Train.
      if optimizer is not None:
        if train_op_fn is not None:
          raise ValueError('train_op_fn and optimizer cannot both be set.')
        train_op = optimizer.minimize(
            regularized_training_loss,
            global_step=tf.compat.v1.train.get_global_step())
      elif train_op_fn is not None:
        train_op = train_op_fn(regularized_training_loss)
      else:
        raise ValueError('train_op_fn and optimizer cannot both be None.')
      train_op = _append_update_ops(train_op)
      # Only summarize mean_loss for SUM reduction to preserve backwards
      # compatibility. Otherwise skip it to avoid unnecessary computation.
      if self._loss_reduction == tf.compat.v1.losses.Reduction.SUM:
        example_weight_sum = tf.math.reduce_sum(
            weights * tf.compat.v1.ones_like(unreduced_loss))
        mean_loss = training_loss / example_weight_sum
      else:
        mean_loss = None
    with tf.compat.v1.name_scope(''):
      keys = metric_keys.MetricKeys
      tf.compat.v1.summary.scalar(
          _summary_key(self._name, keys.LOSS), scalar_loss)
      if mean_loss is not None:
        tf.compat.v1.summary.scalar(
            _summary_key(self._name, keys.LOSS_MEAN), mean_loss)
      if regularization_loss is not None:
        tf.compat.v1.summary.scalar(
            _summary_key(self._name, keys.LOSS_REGULARIZATION),
            regularization_loss)
    return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
        mode=ModeKeys.TRAIN,
        predictions=predictions,
        loss=scalar_loss,
        train_op=train_op)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_privacy/privacy/estimators/v1/head.py [374:415]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    'class_ids': class_ids,
                    'weights': weights,
                    'unreduced_loss': unreduced_loss,
                    'regularization_loss': regularization_loss
                }))

      # Train.
      if optimizer is not None:
        if train_op_fn is not None:
          raise ValueError('train_op_fn and optimizer cannot both be set.')
        train_op = optimizer.minimize(
            regularized_training_loss,
            global_step=tf.compat.v1.train.get_global_step())
      elif train_op_fn is not None:
        train_op = train_op_fn(regularized_training_loss)
      else:
        raise ValueError('train_op_fn and optimizer cannot both be None.')
      train_op = _append_update_ops(train_op)
      # Only summarize mean_loss for SUM reduction to preserve backwards
      # compatibility. Otherwise skip it to avoid unnecessary computation.
      if self._loss_reduction == tf.compat.v1.losses.Reduction.SUM:
        example_weight_sum = tf.math.reduce_sum(
            weights * tf.compat.v1.ones_like(unreduced_loss))
        mean_loss = training_loss / example_weight_sum
      else:
        mean_loss = None
    with tf.compat.v1.name_scope(''):
      keys = metric_keys.MetricKeys
      tf.compat.v1.summary.scalar(
          _summary_key(self._name, keys.LOSS), scalar_loss)
      if mean_loss is not None:
        tf.compat.v1.summary.scalar(
            _summary_key(self._name, keys.LOSS_MEAN), mean_loss)
      if regularization_loss is not None:
        tf.compat.v1.summary.scalar(
            _summary_key(self._name, keys.LOSS_REGULARIZATION),
            regularization_loss)
    return model_fn._TPUEstimatorSpec(  # pylint: disable=protected-access
        mode=ModeKeys.TRAIN,
        predictions=predictions,
        loss=scalar_loss,
        train_op=train_op)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



