in tensorflow_recommenders/tasks/ranking.py [0:0]
def call(self,
labels: tf.Tensor,
predictions: tf.Tensor,
sample_weight: Optional[tf.Tensor] = None,
training: bool = False,
compute_metrics: bool = True) -> tf.Tensor:
"""Computes the task loss and metrics.
Args:
labels: Tensor of labels.
predictions: Tensor of predictions.
sample_weight: Tensor of sample weights.
training: Indicator whether training or test loss is being computed.
compute_metrics: Whether to compute metrics. Set this to False
during training for faster training.
Returns:
loss: Tensor of loss values.
"""
loss = self._loss(
y_true=labels, y_pred=predictions, sample_weight=sample_weight)
if not compute_metrics:
return loss
update_ops = []
for metric in self._ranking_metrics:
update_ops.append(metric.update_state(
y_true=labels, y_pred=predictions, sample_weight=sample_weight))
for metric in self._prediction_metrics:
update_ops.append(
metric.update_state(predictions, sample_weight=sample_weight))
for metric in self._label_metrics:
update_ops.append(
metric.update_state(labels, sample_weight=sample_weight))
for metric in self._loss_metrics:
update_ops.append(
metric.update_state(loss, sample_weight=sample_weight))
# Custom metrics may not return update ops, unlike built-in
# Keras metrics.
update_ops = [x for x in update_ops if x is not None]
with tf.control_dependencies(update_ops):
return tf.identity(loss)